Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 578ab52ece | |||
| c8e9c04d19 | |||
| 2fead2a50f | |||
| df1fe15cf6 | |||
| be9adbd5c1 | |||
| 2907276eca | |||
| 59b02c04bf | |||
| 6a3eb417d5 | |||
| c30ccb308d | |||
| 1572845a2f | |||
| caad34cf58 | |||
| bf683dfe52 | |||
| 307befd7e2 | |||
| b58270ba69 | |||
| 0ecfcb5997 | |||
| 0ea69840c6 | |||
| 208de55a17 | |||
| 1a8077b814 | |||
| fa0e2597e6 | |||
| f357def036 | |||
| 792ce891be | |||
| d473c60571 | |||
| b020a36d10 | |||
| d2e8ff9e20 | |||
| 10a899b6bd | |||
| 41718b98f6 | |||
| caf21526a0 | |||
| 5b38d82aa0 | |||
| e303dfb6fd | |||
| 94b98bfa21 | |||
| 87e400e44e | |||
| 8cb2456248 | |||
| 11b15544c5 | |||
| 344adac7a6 | |||
| 2027d17a9d | |||
| ff881608fb | |||
| 8947ee2709 | |||
| 2f1931f9eb | |||
| 8164090257 | |||
| e803e487c3 | |||
| 5b636878fc | |||
| 3bc3f0390e | |||
| 296e10c0c5 | |||
| a63faebcf1 | |||
| 49236a4391 | |||
| 17b3250f0f | |||
| 5d246f4998 | |||
| f913afa98c | |||
| 90f256aeab | |||
| ee10970b05 | |||
| 35c929ed5e | |||
| 60eb4ceaf7 | |||
| 1b15368c47 | |||
| a720cf572f | |||
| ec7a9ad6e4 | |||
| 5e413159e5 | |||
| d4226d2f73 | |||
| a7c10adf4e | |||
| a4f41d94db | |||
| 71e1883ca0 | |||
| 06eb05570a | |||
| a1b83ffd2c | |||
| 649097ffe0 | |||
| 57f1de13b3 | |||
| c5431132d7 | |||
| c66cebee7a | |||
| c105a58f65 | |||
| 545fd2ad76 | |||
| 315f7d7d04 | |||
| 6aedc5e912 | |||
| 3ac398aa49 | |||
| 781c427788 | |||
| 47e66c5812 | |||
| 9933222452 | |||
| 3f5553548b | |||
| c8273616ee | |||
| 8aebc31806 | |||
| 57ef4e32f4 | |||
| c15fb3a8e5 | |||
| cb07256868 | |||
| 5ec13f81a2 | |||
| 394c393998 | |||
| a4ba5831a0 | |||
| ac45214f7d | |||
| 12a1cf4783 | |||
| 7fd21aefd8 | |||
| 3f9063e775 | |||
| 8758cdca10 | |||
| 529b1e7ec7 | |||
| b8bfba8dc6 | |||
| d6ddcdfa6a | |||
| 7380aae601 | |||
| 6a6cd35985 | |||
| 941a493f49 | |||
| 1e275568f1 | |||
| 2a78b4e9a3 | |||
| 8cf8fc27fa | |||
| 68d67f2cbf | |||
| c1754d9e5d | |||
| af9b8c1be3 | |||
| 292fc5c580 | |||
| 11f5e33a90 |
@ -1,2 +1,6 @@
|
||||
.dockerignore
|
||||
.git
|
||||
build
|
||||
.gitignore
|
||||
appveyor.yml
|
||||
build
|
||||
circle.yml
|
||||
|
||||
5
.mailmap
5
.mailmap
@ -44,6 +44,7 @@ Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
@ -394,6 +395,8 @@ Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com> <scherer_stefan@icloud.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
||||
@ -402,6 +405,8 @@ Steve Richards <steve.richards@docker.com> stevejr <>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com> <wonderflow@zju.edu.cn>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com> <me@darkowlzz.space>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||
|
||||
9
AUTHORS
9
AUTHORS
@ -58,6 +58,7 @@ Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||
@ -158,6 +159,7 @@ David Cramer <davcrame@cisco.com>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Lechner <david@lechnology.com>
|
||||
David Scott <dave@recoil.org>
|
||||
David Sheets <dsheets@docker.com>
|
||||
David Williamson <david.williamson@docker.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
@ -300,6 +302,7 @@ Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jimmy Leger <jimmy.leger@gmail.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joe Doliner <jdoliner@pachyderm.io>
|
||||
Joe Gordon <joe.gordon0@gmail.com>
|
||||
@ -471,9 +474,11 @@ Mrunal Patel <mrunalp@gmail.com>
|
||||
muicoder <muicoder@gmail.com>
|
||||
Muthukumar R <muthur@gmail.com>
|
||||
Máximo Cuadros <mcuadros@gmail.com>
|
||||
Mårten Cassel <marten.cassel@gmail.com>
|
||||
Nace Oroz <orkica@gmail.com>
|
||||
Nahum Shalman <nshalman@omniti.com>
|
||||
Nalin Dahyabhai <nalin@redhat.com>
|
||||
Nao YONASHIRO <owan.orisano@gmail.com>
|
||||
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
||||
Natalie Parker <nparker@omnifone.com>
|
||||
Nate Brennand <nate.brennand@clever.com>
|
||||
@ -595,7 +600,7 @@ Spencer Brown <spencer@spencerbrown.org>
|
||||
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <scherer_stefan@icloud.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
@ -605,7 +610,9 @@ Steve Richards <steve.richards@docker.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
Sungwon Han <sungwon.han@navercorp.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||
|
||||
3
Jenkinsfile
vendored
3
Jenkinsfile
vendored
@ -5,8 +5,9 @@ wrappedNode(label: 'linux && x86_64', cleanWorkspace: true) {
|
||||
|
||||
stage "Run end-to-end test suite"
|
||||
sh "docker version"
|
||||
sh "docker info"
|
||||
sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e"
|
||||
DOCKER_BUILDKIT=1 make -f docker.Makefile test-e2e"
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.12.4
|
||||
GOVERSION: 1.12.8
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
|
||||
62
circle.yml
62
circle.yml
@ -4,35 +4,39 @@ jobs:
|
||||
|
||||
lint:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:18.03-git'}]
|
||||
docker: [{image: 'docker:18.09-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 18.03.1-ce
|
||||
reusable: true
|
||||
exclusive: false
|
||||
version: 18.09.3
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
command: docker version
|
||||
- run:
|
||||
name: "Lint"
|
||||
command: |
|
||||
docker build -f dockerfiles/Dockerfile.lint --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.lint --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-linter:$CIRCLE_BUILD_NUM
|
||||
|
||||
cross:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:18.03-git'}]
|
||||
docker: [{image: 'docker:18.09-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
parallelism: 3
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 18.03.1-ce
|
||||
reusable: true
|
||||
exclusive: false
|
||||
version: 18.09.3
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Cross"
|
||||
command: |
|
||||
docker build -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||
docker run \
|
||||
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||
@ -46,18 +50,20 @@ jobs:
|
||||
|
||||
test:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:18.03-git'}]
|
||||
docker: [{image: 'docker:18.09-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 18.03.1-ce
|
||||
reusable: true
|
||||
exclusive: false
|
||||
version: 18.09.3
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Unit Test with Coverage"
|
||||
command: |
|
||||
mkdir -p test-results/unit-tests
|
||||
docker build -f dockerfiles/Dockerfile.dev --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker run \
|
||||
-e GOTESTSUM_JUNITFILE=/tmp/junit.xml \
|
||||
--name \
|
||||
@ -77,37 +83,43 @@ jobs:
|
||||
echo 'Codecov failed to upload'
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
path: test-results
|
||||
|
||||
validate:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:18.03-git'}]
|
||||
docker: [{image: 'docker:18.09-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 18.03.1-ce
|
||||
reusable: true
|
||||
exclusive: false
|
||||
version: 18.09.3
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Validate Vendor, Docs, and Code Generation"
|
||||
command: |
|
||||
rm -f .dockerignore # include .git
|
||||
docker build -f dockerfiles/Dockerfile.dev --tag cli-builder-with-git:$CIRCLE_BUILD_NUM .
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder-with-git:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \
|
||||
make ci-validate
|
||||
no_output_timeout: 15m
|
||||
shellcheck:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:18.03-git'}]
|
||||
docker: [{image: 'docker:18.09-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 18.03.1-ce
|
||||
reusable: true
|
||||
exclusive: false
|
||||
version: 18.09.3
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Run shellcheck"
|
||||
command: |
|
||||
docker build -f dockerfiles/Dockerfile.shellcheck --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.shellcheck --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-validator:$CIRCLE_BUILD_NUM \
|
||||
make shellcheck
|
||||
workflows:
|
||||
|
||||
@ -101,5 +101,6 @@ func main() {
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: "testing",
|
||||
Experimental: os.Getenv("HELLO_EXPERIMENTAL") != "",
|
||||
})
|
||||
}
|
||||
|
||||
@ -12,9 +12,10 @@ import (
|
||||
)
|
||||
|
||||
type fakeCandidate struct {
|
||||
path string
|
||||
exec bool
|
||||
meta string
|
||||
path string
|
||||
exec bool
|
||||
meta string
|
||||
allowExperimental bool
|
||||
}
|
||||
|
||||
func (c *fakeCandidate) Path() string {
|
||||
@ -35,9 +36,10 @@ func TestValidateCandidate(t *testing.T) {
|
||||
builtinName = NamePrefix + "builtin"
|
||||
builtinAlias = NamePrefix + "alias"
|
||||
|
||||
badPrefixPath = "/usr/local/libexec/cli-plugins/wobble"
|
||||
badNamePath = "/usr/local/libexec/cli-plugins/docker-123456"
|
||||
goodPluginPath = "/usr/local/libexec/cli-plugins/" + goodPluginName
|
||||
badPrefixPath = "/usr/local/libexec/cli-plugins/wobble"
|
||||
badNamePath = "/usr/local/libexec/cli-plugins/docker-123456"
|
||||
goodPluginPath = "/usr/local/libexec/cli-plugins/" + goodPluginName
|
||||
metaExperimental = `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing", "Experimental": true}`
|
||||
)
|
||||
|
||||
fakeroot := &cobra.Command{Use: "docker"}
|
||||
@ -49,40 +51,46 @@ func TestValidateCandidate(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, tc := range []struct {
|
||||
c *fakeCandidate
|
||||
name string
|
||||
c *fakeCandidate
|
||||
|
||||
// Either err or invalid may be non-empty, but not both (both can be empty for a good plugin).
|
||||
err string
|
||||
invalid string
|
||||
}{
|
||||
/* Each failing one of the tests */
|
||||
{c: &fakeCandidate{path: ""}, err: "plugin candidate path cannot be empty"},
|
||||
{c: &fakeCandidate{path: badPrefixPath}, err: fmt.Sprintf("does not have %q prefix", NamePrefix)},
|
||||
{c: &fakeCandidate{path: badNamePath}, invalid: "did not match"},
|
||||
{c: &fakeCandidate{path: builtinName}, invalid: `plugin "builtin" duplicates builtin command`},
|
||||
{c: &fakeCandidate{path: builtinAlias}, invalid: `plugin "alias" duplicates an alias of builtin command "builtin"`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: false}, invalid: fmt.Sprintf("failed to fetch metadata: faked a failure to exec %q", goodPluginPath)},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `xyzzy`}, invalid: "invalid character"},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{}`}, invalid: `plugin SchemaVersion "" is not valid`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "xyzzy"}`}, invalid: `plugin SchemaVersion "xyzzy" is not valid`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0"}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": ""}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "empty path", c: &fakeCandidate{path: ""}, err: "plugin candidate path cannot be empty"},
|
||||
{name: "bad prefix", c: &fakeCandidate{path: badPrefixPath}, err: fmt.Sprintf("does not have %q prefix", NamePrefix)},
|
||||
{name: "bad path", c: &fakeCandidate{path: badNamePath}, invalid: "did not match"},
|
||||
{name: "builtin command", c: &fakeCandidate{path: builtinName}, invalid: `plugin "builtin" duplicates builtin command`},
|
||||
{name: "builtin alias", c: &fakeCandidate{path: builtinAlias}, invalid: `plugin "alias" duplicates an alias of builtin command "builtin"`},
|
||||
{name: "fetch failure", c: &fakeCandidate{path: goodPluginPath, exec: false}, invalid: fmt.Sprintf("failed to fetch metadata: faked a failure to exec %q", goodPluginPath)},
|
||||
{name: "metadata not json", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `xyzzy`}, invalid: "invalid character"},
|
||||
{name: "empty schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{}`}, invalid: `plugin SchemaVersion "" is not valid`},
|
||||
{name: "invalid schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "xyzzy"}`}, invalid: `plugin SchemaVersion "xyzzy" is not valid`},
|
||||
{name: "no vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0"}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "empty vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": ""}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "experimental required", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental}, invalid: "requires experimental CLI"},
|
||||
// This one should work
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`}},
|
||||
{name: "valid", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`}},
|
||||
{name: "valid + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`, allowExperimental: true}},
|
||||
{name: "experimental + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental, allowExperimental: true}},
|
||||
} {
|
||||
p, err := newPlugin(tc.c, fakeroot)
|
||||
if tc.err != "" {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else if tc.invalid != "" {
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{})))
|
||||
assert.ErrorContains(t, p.Err, tc.invalid)
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, NamePrefix+p.Name, goodPluginName)
|
||||
assert.Equal(t, p.SchemaVersion, "0.1.0")
|
||||
assert.Equal(t, p.Vendor, "e2e-testing")
|
||||
}
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p, err := newPlugin(tc.c, fakeroot, tc.c.allowExperimental)
|
||||
if tc.err != "" {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else if tc.invalid != "" {
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{})))
|
||||
assert.ErrorContains(t, p.Err, tc.invalid)
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, NamePrefix+p.Name, goodPluginName)
|
||||
assert.Equal(t, p.SchemaVersion, "0.1.0")
|
||||
assert.Equal(t, p.Vendor, "e2e-testing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -27,10 +28,23 @@ func (e errPluginNotFound) Error() string {
|
||||
return "Error: No such CLI plugin: " + string(e)
|
||||
}
|
||||
|
||||
type errPluginRequireExperimental string
|
||||
|
||||
// Note: errPluginRequireExperimental implements notFound so that the plugin
|
||||
// is skipped when listing the plugins.
|
||||
func (e errPluginRequireExperimental) NotFound() {}
|
||||
|
||||
func (e errPluginRequireExperimental) Error() string {
|
||||
return fmt.Sprintf("plugin candidate %q: requires experimental CLI", string(e))
|
||||
}
|
||||
|
||||
type notFound interface{ NotFound() }
|
||||
|
||||
// IsNotFound is true if the given error is due to a plugin not being found.
|
||||
func IsNotFound(err error) bool {
|
||||
if e, ok := err.(*pluginError); ok {
|
||||
err = e.Cause()
|
||||
}
|
||||
_, ok := err.(notFound)
|
||||
return ok
|
||||
}
|
||||
@ -117,12 +131,14 @@ func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error
|
||||
continue
|
||||
}
|
||||
c := &candidate{paths[0]}
|
||||
p, err := newPlugin(c, rootcmd)
|
||||
p, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.ShadowedPaths = paths[1:]
|
||||
plugins = append(plugins, p)
|
||||
if !IsNotFound(p.Err) {
|
||||
p.ShadowedPaths = paths[1:]
|
||||
plugins = append(plugins, p)
|
||||
}
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
@ -159,12 +175,19 @@ func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command
|
||||
}
|
||||
|
||||
c := &candidate{path: path}
|
||||
plugin, err := newPlugin(c, rootcmd)
|
||||
plugin, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if plugin.Err != nil {
|
||||
// TODO: why are we not returning plugin.Err?
|
||||
|
||||
err := plugin.Err.(*pluginError).Cause()
|
||||
// if an experimental plugin was invoked directly while experimental mode is off
|
||||
// provide a more useful error message than "not found".
|
||||
if err, ok := err.(errPluginRequireExperimental); ok {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
cmd := exec.Command(plugin.Path, args...)
|
||||
|
||||
@ -22,4 +22,7 @@ type Metadata struct {
|
||||
ShortDescription string `json:",omitempty"`
|
||||
// URL is a pointer to the plugin's homepage.
|
||||
URL string `json:",omitempty"`
|
||||
// Experimental specifies whether the plugin is experimental.
|
||||
// Experimental plugins are not displayed on non-experimental CLIs.
|
||||
Experimental bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -33,7 +33,9 @@ type Plugin struct {
|
||||
// is set, and is always a `pluginError`, but the `Plugin` is still
|
||||
// returned with no error. An error is only returned due to a
|
||||
// non-recoverable error.
|
||||
func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||
//
|
||||
// nolint: gocyclo
|
||||
func newPlugin(c Candidate, rootcmd *cobra.Command, allowExperimental bool) (Plugin, error) {
|
||||
path := c.Path()
|
||||
if path == "" {
|
||||
return Plugin{}, errors.New("plugin candidate path cannot be empty")
|
||||
@ -94,7 +96,10 @@ func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||
p.Err = wrapAsPluginError(err, "invalid metadata")
|
||||
return p, nil
|
||||
}
|
||||
|
||||
if p.Experimental && !allowExperimental {
|
||||
p.Err = &pluginError{errPluginRequireExperimental(p.Name)}
|
||||
return p, nil
|
||||
}
|
||||
if p.Metadata.SchemaVersion != "0.1.0" {
|
||||
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
|
||||
return p, nil
|
||||
|
||||
@ -114,11 +114,14 @@ func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta
|
||||
fullname := manager.NamePrefix + name
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name),
|
||||
Short: fullname + " is a Docker CLI plugin",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: PersistentPreRunE,
|
||||
Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name),
|
||||
Short: fullname + " is a Docker CLI plugin",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// We can't use this as the hook directly since it is initialised later (in runPlugin)
|
||||
return PersistentPreRunE(cmd, args)
|
||||
},
|
||||
TraverseChildren: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
dcontext "github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
kubcontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
@ -210,11 +209,11 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
|
||||
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||
|
||||
baseContextSore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
|
||||
baseContextStore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
|
||||
cli.contextStore = &ContextStoreWithDefault{
|
||||
Store: baseContextSore,
|
||||
Store: baseContextStore,
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts.Common, cli.ConfigFile(), cli.Err())
|
||||
return ResolveDefaultContext(opts.Common, cli.ConfigFile(), cli.contextStoreConfig, cli.Err())
|
||||
},
|
||||
}
|
||||
cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore)
|
||||
@ -259,10 +258,11 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
storeConfig := DefaultContextStoreConfig()
|
||||
store := &ContextStoreWithDefault{
|
||||
Store: store.New(cliconfig.ContextStoreDir(), defaultContextStoreConfig()),
|
||||
Store: store.New(cliconfig.ContextStoreDir(), storeConfig),
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts, configFile, ioutil.Discard)
|
||||
return ResolveDefaultContext(opts, configFile, storeConfig, ioutil.Discard)
|
||||
},
|
||||
}
|
||||
contextName, err := resolveContextName(opts, configFile, store)
|
||||
@ -453,7 +453,7 @@ func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
|
||||
WithContentTrustFromEnv(),
|
||||
WithContainerizedClient(containerizedengine.NewClient),
|
||||
}
|
||||
cli.contextStoreConfig = defaultContextStoreConfig()
|
||||
cli.contextStoreConfig = DefaultContextStoreConfig()
|
||||
ops = append(defaultOps, ops...)
|
||||
if err := cli.Apply(ops...); err != nil {
|
||||
return nil, err
|
||||
@ -526,10 +526,22 @@ func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigF
|
||||
return DefaultContextName, nil
|
||||
}
|
||||
|
||||
func defaultContextStoreConfig() store.Config {
|
||||
var defaultStoreEndpoints = []store.NamedTypeGetter{
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
}
|
||||
|
||||
// RegisterDefaultStoreEndpoints registers a new named endpoint
|
||||
// metadata type with the default context store config, so that
|
||||
// endpoint will be supported by stores using the config returned by
|
||||
// DefaultContextStoreConfig.
|
||||
func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) {
|
||||
defaultStoreEndpoints = append(defaultStoreEndpoints, ep...)
|
||||
}
|
||||
|
||||
// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured.
|
||||
func DefaultContextStoreConfig() store.Config {
|
||||
return store.NewConfig(
|
||||
func() interface{} { return &DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
store.EndpointTypeGetter(kubcontext.KubernetesEndpoint, func() interface{} { return &kubcontext.EndpointMeta{} }),
|
||||
defaultStoreEndpoints...,
|
||||
)
|
||||
}
|
||||
|
||||
@ -7,7 +7,6 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
@ -97,7 +96,7 @@ func WithContainerizedClient(containerizedFn func(string) (clitypes.Containerize
|
||||
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
switch endpointName {
|
||||
case docker.DockerEndpoint, kubernetes.KubernetesEndpoint:
|
||||
case docker.DockerEndpoint:
|
||||
return fmt.Errorf("cannot change %q endpoint type", endpointName)
|
||||
}
|
||||
cli.contextStoreConfig.SetEndpoint(endpointName, endpointType)
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
@ -79,6 +80,24 @@ func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||
assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion()))
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithHttpProxyEnv(t *testing.T) {
|
||||
defer env.Patch(t, "HTTP_PROXY", "http://proxy.acme.com:1234")()
|
||||
defer env.Patch(t, "DOCKER_HOST", "tcp://docker.acme.com:2376")()
|
||||
|
||||
opts := &flags.CommonOptions{}
|
||||
configFile := &configfile.ConfigFile{}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
transport, ok := apiclient.HTTPClient().Transport.(*http.Transport)
|
||||
assert.Assert(t, ok)
|
||||
assert.Assert(t, transport.Proxy != nil)
|
||||
request, err := http.NewRequest(http.MethodGet, "tcp://docker.acme.com:2376", nil)
|
||||
assert.NilError(t, err)
|
||||
url, err := transport.Proxy(request)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("http://proxy.acme.com:1234", url.String()))
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
pingFunc func() (types.Ping, error)
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -707,6 +708,15 @@ func parseNetworkOpts(copts *containerOptions) (map[string]*networktypes.Endpoin
|
||||
if _, ok := endpoints[n.Target]; ok {
|
||||
return nil, errdefs.InvalidParameter(errors.Errorf("network %q is specified multiple times", n.Target))
|
||||
}
|
||||
|
||||
// For backward compatibility: if no custom options are provided for the network,
|
||||
// and only a single network is specified, omit the endpoint-configuration
|
||||
// on the client (the daemon will still create it when creating the container)
|
||||
if i == 0 && len(copts.netMode.Value()) == 1 {
|
||||
if ep == nil || reflect.DeepEqual(*ep, networktypes.EndpointSettings{}) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
endpoints[n.Target] = ep
|
||||
}
|
||||
if hasUserDefined && hasNonUserDefined {
|
||||
|
||||
@ -401,13 +401,13 @@ func TestParseNetworkConfig(t *testing.T) {
|
||||
{
|
||||
name: "single-network-legacy",
|
||||
flags: []string{"--network", "net1"},
|
||||
expected: map[string]*networktypes.EndpointSettings{"net1": {}},
|
||||
expected: map[string]*networktypes.EndpointSettings{},
|
||||
expectedCfg: container.HostConfig{NetworkMode: "net1"},
|
||||
},
|
||||
{
|
||||
name: "single-network-advanced",
|
||||
flags: []string{"--network", "name=net1"},
|
||||
expected: map[string]*networktypes.EndpointSettings{"net1": {}},
|
||||
expected: map[string]*networktypes.EndpointSettings{},
|
||||
expectedCfg: container.HostConfig{NetworkMode: "net1"},
|
||||
},
|
||||
{
|
||||
|
||||
@ -78,13 +78,19 @@ func RunCreate(cli command.Cli, o *CreateOptions) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to parse default-stack-orchestrator")
|
||||
}
|
||||
if o.From == "" && o.Docker == nil && o.Kubernetes == nil {
|
||||
return createFromExistingContext(s, cli.CurrentContext(), stackOrchestrator, o)
|
||||
switch {
|
||||
case o.From == "" && o.Docker == nil && o.Kubernetes == nil:
|
||||
err = createFromExistingContext(s, cli.CurrentContext(), stackOrchestrator, o)
|
||||
case o.From != "":
|
||||
err = createFromExistingContext(s, o.From, stackOrchestrator, o)
|
||||
default:
|
||||
err = createNewContext(o, stackOrchestrator, cli, s)
|
||||
}
|
||||
if o.From != "" {
|
||||
return createFromExistingContext(s, o.From, stackOrchestrator, o)
|
||||
if err == nil {
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully created context %q\n", o.Name)
|
||||
}
|
||||
return createNewContext(o, stackOrchestrator, cli, s)
|
||||
return err
|
||||
}
|
||||
|
||||
func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator, cli command.Cli, s store.Writer) error {
|
||||
@ -127,8 +133,6 @@ func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator,
|
||||
if err := s.ResetTLSMaterial(o.Name, &contextTLSData); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully created context %q\n", o.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
@ -131,6 +132,11 @@ func TestCreateInvalids(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func assertContextCreateLogging(t *testing.T, cli *test.FakeCli, n string) {
|
||||
assert.Equal(t, n+"\n", cli.OutBuffer().String())
|
||||
assert.Equal(t, fmt.Sprintf("Successfully created context %q\n", n), cli.ErrBuffer().String())
|
||||
}
|
||||
|
||||
func TestCreateOrchestratorSwarm(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
@ -141,8 +147,7 @@ func TestCreateOrchestratorSwarm(t *testing.T) {
|
||||
Docker: map[string]string{},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "test\n", cli.OutBuffer().String())
|
||||
assert.Equal(t, "Successfully created context \"test\"\n", cli.ErrBuffer().String())
|
||||
assertContextCreateLogging(t, cli, "test")
|
||||
}
|
||||
|
||||
func TestCreateOrchestratorEmpty(t *testing.T) {
|
||||
@ -154,6 +159,7 @@ func TestCreateOrchestratorEmpty(t *testing.T) {
|
||||
Docker: map[string]string{},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assertContextCreateLogging(t, cli, "test")
|
||||
}
|
||||
|
||||
func validateTestKubeEndpoint(t *testing.T, s store.Reader, name string) {
|
||||
@ -189,6 +195,7 @@ func TestCreateOrchestratorAllKubernetesEndpointFromCurrent(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
assertContextCreateLogging(t, cli, "test")
|
||||
validateTestKubeEndpoint(t, cli.ContextStore(), "test")
|
||||
}
|
||||
|
||||
@ -225,6 +232,7 @@ func TestCreateFromContext(t *testing.T) {
|
||||
defer cleanup()
|
||||
revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")
|
||||
defer revert()
|
||||
cli.ResetOutputBuffers()
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "original",
|
||||
Description: "original description",
|
||||
@ -236,6 +244,9 @@ func TestCreateFromContext(t *testing.T) {
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "original")
|
||||
|
||||
cli.ResetOutputBuffers()
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "dummy",
|
||||
Description: "dummy description",
|
||||
@ -247,11 +258,13 @@ func TestCreateFromContext(t *testing.T) {
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "dummy")
|
||||
|
||||
cli.SetCurrentContext("dummy")
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cli.ResetOutputBuffers()
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
From: "original",
|
||||
Name: c.name,
|
||||
@ -261,6 +274,7 @@ func TestCreateFromContext(t *testing.T) {
|
||||
Kubernetes: c.kubernetes,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assertContextCreateLogging(t, cli, c.name)
|
||||
newContext, err := cli.ContextStore().GetMetadata(c.name)
|
||||
assert.NilError(t, err)
|
||||
newContextTyped, err := command.GetDockerContext(newContext)
|
||||
@ -308,6 +322,7 @@ func TestCreateFromCurrent(t *testing.T) {
|
||||
defer cleanup()
|
||||
revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")
|
||||
defer revert()
|
||||
cli.ResetOutputBuffers()
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "original",
|
||||
Description: "original description",
|
||||
@ -319,17 +334,20 @@ func TestCreateFromCurrent(t *testing.T) {
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "original")
|
||||
|
||||
cli.SetCurrentContext("original")
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cli.ResetOutputBuffers()
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
Name: c.name,
|
||||
Description: c.description,
|
||||
DefaultStackOrchestrator: c.orchestrator,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assertContextCreateLogging(t, cli, c.name)
|
||||
newContext, err := cli.ContextStore().GetMetadata(c.name)
|
||||
assert.NilError(t, err)
|
||||
newContextTyped, err := command.GetDockerContext(newContext)
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
func newImportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "import CONTEXT FILE|-",
|
||||
Short: "Import a context from a tar file",
|
||||
Short: "Import a context from a tar or zip file",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return RunImport(dockerCli, args[0], args[1])
|
||||
@ -28,6 +28,7 @@ func RunImport(dockerCli command.Cli, name string, source string) error {
|
||||
if err := checkContextNameForCreation(dockerCli.ContextStore(), name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
if source == "-" {
|
||||
reader = dockerCli.In()
|
||||
@ -43,6 +44,7 @@ func RunImport(dockerCli command.Cli, name string, source string) error {
|
||||
if err := store.Import(name, dockerCli.ContextStore(), reader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
fmt.Fprintf(dockerCli.Err(), "Successfully imported context %q\n", name)
|
||||
return nil
|
||||
|
||||
@ -3,15 +3,11 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -20,7 +16,7 @@ const (
|
||||
DefaultContextName = "default"
|
||||
)
|
||||
|
||||
// DefaultContext contains the default context data for all enpoints
|
||||
// DefaultContext contains the default context data for all endpoints
|
||||
type DefaultContext struct {
|
||||
Meta store.Metadata
|
||||
TLS store.ContextTLSData
|
||||
@ -35,8 +31,21 @@ type ContextStoreWithDefault struct {
|
||||
Resolver DefaultContextResolver
|
||||
}
|
||||
|
||||
// resolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func resolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, stderr io.Writer) (*DefaultContext, error) {
|
||||
// EndpointDefaultResolver is implemented by any EndpointMeta object
|
||||
// which wants to be able to populate the store with whatever their default is.
|
||||
type EndpointDefaultResolver interface {
|
||||
// ResolveDefault returns values suitable for storing in store.Metadata.Endpoints
|
||||
// and store.ContextTLSData.Endpoints.
|
||||
//
|
||||
// An error is only returned for something fatal, not simply
|
||||
// the lack of a default (e.g. because the config file which
|
||||
// would contain it is missing). If there is no default then
|
||||
// returns nil, nil, nil.
|
||||
ResolveDefault(Orchestrator) (interface{}, *store.EndpointTLSData, error)
|
||||
}
|
||||
|
||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) {
|
||||
stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -62,20 +71,28 @@ func resolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
||||
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
|
||||
// Default context uses env-based kubeconfig for Kubernetes endpoint configuration
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := kubernetes.FromKubeConfig(kubeconfig, "", "")
|
||||
if (stackOrchestrator == OrchestratorKubernetes || stackOrchestrator == OrchestratorAll) && err != nil {
|
||||
return nil, errors.Wrapf(err, "default orchestrator is %s but kubernetes endpoint could not be found", stackOrchestrator)
|
||||
}
|
||||
if err == nil {
|
||||
contextMetadata.Endpoints[kubernetes.KubernetesEndpoint] = kubeEP.EndpointMeta
|
||||
if kubeEP.TLSData != nil {
|
||||
contextTLSData.Endpoints[kubernetes.KubernetesEndpoint] = *kubeEP.TLSData.ToStoreTLSData()
|
||||
if err := storeconfig.ForeachEndpointType(func(n string, get store.TypeGetter) error {
|
||||
if n == docker.DockerEndpoint { // handled above
|
||||
return nil
|
||||
}
|
||||
ep := get()
|
||||
if i, ok := ep.(EndpointDefaultResolver); ok {
|
||||
meta, tls, err := i.ResolveDefault(stackOrchestrator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if meta == nil {
|
||||
return nil
|
||||
}
|
||||
contextMetadata.Endpoints[n] = meta
|
||||
if tls != nil {
|
||||
contextTLSData.Endpoints[n] = *tls
|
||||
}
|
||||
}
|
||||
// Nothing to be done
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
@ -63,22 +62,20 @@ func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
defer env.Patch(t, "DOCKER_HOST", "ssh://someswarmserver")()
|
||||
defer env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")()
|
||||
cli.configFile = &configfile.ConfigFile{
|
||||
StackOrchestrator: "all",
|
||||
StackOrchestrator: "swarm",
|
||||
}
|
||||
ctx, err := resolveDefaultContext(&cliflags.CommonOptions{
|
||||
ctx, err := ResolveDefaultContext(&cliflags.CommonOptions{
|
||||
TLS: true,
|
||||
TLSOptions: &tlsconfig.Options{
|
||||
CAFile: "./testdata/ca.pem",
|
||||
},
|
||||
}, cli.ConfigFile(), cli.Err())
|
||||
}, cli.ConfigFile(), DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, OrchestratorAll, ctx.Meta.Metadata.(DockerContext).StackOrchestrator)
|
||||
assert.Equal(t, OrchestratorSwarm, ctx.Meta.Metadata.(DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "ssh://someswarmserver", ctx.Meta.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host)
|
||||
golden.Assert(t, string(ctx.TLS.Endpoints[docker.DockerEndpoint].Files["ca.pem"]), "ca.pem")
|
||||
assert.DeepEqual(t, "zoinx", ctx.Meta.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta).DefaultNamespace)
|
||||
}
|
||||
|
||||
func TestExportDefaultImport(t *testing.T) {
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
@ -173,7 +174,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
}))
|
||||
}
|
||||
|
||||
s.Allow(authprovider.NewDockerAuthProvider())
|
||||
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
if len(options.secrets) > 0 {
|
||||
sp, err := parseSecretSpecs(options.secrets)
|
||||
if err != nil {
|
||||
@ -215,6 +216,14 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
})
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" {
|
||||
options.progress = v
|
||||
}
|
||||
|
||||
if strings.EqualFold(options.platform, "local") {
|
||||
options.platform = platforms.DefaultString()
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
defer func() { // make sure the Status ends cleanly on build errors
|
||||
s.Close()
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"github.com/docker/cli/cli/version"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"github.com/docker/cli/templates"
|
||||
kubeapi "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -243,7 +244,7 @@ func getKubernetesVersion(dockerCli command.Cli, kubeConfig string) *kubernetesV
|
||||
err error
|
||||
)
|
||||
if dockerCli.CurrentContext() == "" {
|
||||
clientConfig = kubernetes.NewKubernetesConfig(kubeConfig)
|
||||
clientConfig = kubeapi.NewKubernetesConfig(kubeConfig)
|
||||
} else {
|
||||
clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore())
|
||||
}
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@ -69,12 +70,14 @@ func loadPrivKey(streams command.Streams, keyPath string, options keyLoadOptions
|
||||
}
|
||||
|
||||
func getPrivKeyBytesFromPath(keyPath string) ([]byte, error) {
|
||||
fileInfo, err := os.Stat(keyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileInfo.Mode()&nonOwnerReadWriteMask != 0 {
|
||||
return nil, fmt.Errorf("private key file %s must not be readable or writable by others", keyPath)
|
||||
if runtime.GOOS != "windows" {
|
||||
fileInfo, err := os.Stat(keyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileInfo.Mode()&nonOwnerReadWriteMask != 0 {
|
||||
return nil, fmt.Errorf("private key file %s must not be readable or writable by others", keyPath)
|
||||
}
|
||||
}
|
||||
|
||||
from, err := os.OpenFile(keyPath, os.O_RDONLY, notary.PrivExecPerms)
|
||||
|
||||
@ -16,6 +16,8 @@ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("deploy", "replicas"): toInt,
|
||||
servicePath("deploy", "update_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "rollback_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||
|
||||
@ -479,12 +479,13 @@ func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string,
|
||||
}
|
||||
|
||||
filePath := expandUser(volume.Source, lookupEnv)
|
||||
// Check for a Unix absolute path first, to handle a Windows client
|
||||
// with a Unix daemon. This handles a Windows client connecting to a
|
||||
// Unix daemon. Note that this is not required for Docker for Windows
|
||||
// when specifying a local Windows path, because Docker for Windows
|
||||
// translates the Windows path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) {
|
||||
// Check if source is an absolute path (either Unix or Windows), to
|
||||
// handle a Windows client with a Unix daemon or vice-versa.
|
||||
//
|
||||
// Note that this is not required for Docker for Windows when specifying
|
||||
// a local Windows path, because Docker for Windows translates the Windows
|
||||
// path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||
filePath = absPath(workingDir, filePath)
|
||||
}
|
||||
volume.Source = filePath
|
||||
|
||||
@ -582,7 +582,7 @@ volumes:
|
||||
|
||||
func TestLoadWithInterpolationCastFull(t *testing.T) {
|
||||
dict, err := ParseYAML([]byte(`
|
||||
version: "3.4"
|
||||
version: "3.7"
|
||||
services:
|
||||
web:
|
||||
configs:
|
||||
@ -599,6 +599,9 @@ services:
|
||||
update_config:
|
||||
parallelism: $theint
|
||||
max_failure_ratio: $thefloat
|
||||
rollback_config:
|
||||
parallelism: $theint
|
||||
max_failure_ratio: $thefloat
|
||||
restart_policy:
|
||||
max_attempts: $theint
|
||||
ports:
|
||||
@ -649,7 +652,7 @@ networks:
|
||||
assert.NilError(t, err)
|
||||
expected := &types.Config{
|
||||
Filename: "filename.yml",
|
||||
Version: "3.4",
|
||||
Version: "3.7",
|
||||
Services: []types.ServiceConfig{
|
||||
{
|
||||
Name: "web",
|
||||
@ -675,6 +678,10 @@ networks:
|
||||
Parallelism: uint64Ptr(555),
|
||||
MaxFailureRatio: 3.14,
|
||||
},
|
||||
RollbackConfig: &types.UpdateConfig{
|
||||
Parallelism: uint64Ptr(555),
|
||||
MaxFailureRatio: 3.14,
|
||||
},
|
||||
RestartPolicy: &types.RestartPolicy{
|
||||
MaxAttempts: uint64Ptr(555),
|
||||
},
|
||||
@ -978,6 +985,84 @@ services:
|
||||
assert.Error(t, err, `invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
|
||||
func TestLoadBindMountSourceIsWindowsAbsolute(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc string
|
||||
yaml string
|
||||
expected types.ServiceVolumeConfig
|
||||
}{
|
||||
{
|
||||
doc: "Z-drive lowercase",
|
||||
yaml: `
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
windows:
|
||||
image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019
|
||||
volumes:
|
||||
- type: bind
|
||||
source: z:\
|
||||
target: c:\data
|
||||
`,
|
||||
expected: types.ServiceVolumeConfig{Type: "bind", Source: `z:\`, Target: `c:\data`},
|
||||
},
|
||||
{
|
||||
doc: "Z-drive uppercase",
|
||||
yaml: `
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
windows:
|
||||
image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019
|
||||
volumes:
|
||||
- type: bind
|
||||
source: Z:\
|
||||
target: C:\data
|
||||
`,
|
||||
expected: types.ServiceVolumeConfig{Type: "bind", Source: `Z:\`, Target: `C:\data`},
|
||||
},
|
||||
{
|
||||
doc: "Z-drive subdirectory",
|
||||
yaml: `
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
windows:
|
||||
image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019
|
||||
volumes:
|
||||
- type: bind
|
||||
source: Z:\some-dir
|
||||
target: C:\data
|
||||
`,
|
||||
expected: types.ServiceVolumeConfig{Type: "bind", Source: `Z:\some-dir`, Target: `C:\data`},
|
||||
},
|
||||
{
|
||||
doc: "forward-slashes",
|
||||
yaml: `
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: app:latest
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /z/some-dir
|
||||
target: /c/data
|
||||
`,
|
||||
expected: types.ServiceVolumeConfig{Type: "bind", Source: `/z/some-dir`, Target: `/c/data`},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
config, err := loadYAML(tc.yaml)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Len(config.Services[0].Volumes, 1))
|
||||
assert.Check(t, is.DeepEqual(tc.expected, config.Services[0].Volumes[0]))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBindMountWithSource(t *testing.T) {
|
||||
config, err := loadYAML(`
|
||||
version: "3.5"
|
||||
|
||||
66
cli/compose/loader/windows_path.go
Normal file
66
cli/compose/loader/windows_path.go
Normal file
@ -0,0 +1,66 @@
|
||||
package loader
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// https://github.com/golang/go/blob/master/LICENSE
|
||||
|
||||
// This file contains utilities to check for Windows absolute paths on Linux.
|
||||
// The code in this file was largely copied from the Golang filepath package
|
||||
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
|
||||
|
||||
func isSlash(c uint8) bool {
|
||||
return c == '\\' || c == '/'
|
||||
}
|
||||
|
||||
// isAbs reports whether the path is a Windows absolute path.
|
||||
func isAbs(path string) (b bool) {
|
||||
l := volumeNameLen(path)
|
||||
if l == 0 {
|
||||
return false
|
||||
}
|
||||
path = path[l:]
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
return isSlash(path[0])
|
||||
}
|
||||
|
||||
// volumeNameLen returns length of the leading volume name on Windows.
|
||||
// It returns 0 elsewhere.
|
||||
// nolint: gocyclo
|
||||
func volumeNameLen(path string) int {
|
||||
if len(path) < 2 {
|
||||
return 0
|
||||
}
|
||||
// with drive letter
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
return 2
|
||||
}
|
||||
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
|
||||
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
|
||||
!isSlash(path[2]) && path[2] != '.' {
|
||||
// first, leading `\\` and next shouldn't be `\`. its server name.
|
||||
for n := 3; n < l-1; n++ {
|
||||
// second, next '\' shouldn't be repeated.
|
||||
if isSlash(path[n]) {
|
||||
n++
|
||||
// third, following something characters. its share name.
|
||||
if !isSlash(path[n]) {
|
||||
if path[n] == '.' {
|
||||
break
|
||||
}
|
||||
for ; n < l; n++ {
|
||||
if isSlash(path[n]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
61
cli/compose/loader/windows_path_test.go
Normal file
61
cli/compose/loader/windows_path_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package loader
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// https://github.com/golang/go/blob/master/LICENSE
|
||||
|
||||
// The code in this file was copied from the Golang filepath package with some
|
||||
// small modifications to run it on non-Windows platforms.
|
||||
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_test.go#L711-L763
|
||||
|
||||
import "testing"
|
||||
|
||||
type IsAbsTest struct {
|
||||
path string
|
||||
isAbs bool
|
||||
}
|
||||
|
||||
var isabstests = []IsAbsTest{
|
||||
{"", false},
|
||||
{"/", true},
|
||||
{"/usr/bin/gcc", true},
|
||||
{"..", false},
|
||||
{"/a/../bb", true},
|
||||
{".", false},
|
||||
{"./", false},
|
||||
{"lala", false},
|
||||
}
|
||||
|
||||
var winisabstests = []IsAbsTest{
|
||||
{`C:\`, true},
|
||||
{`c\`, false},
|
||||
{`c::`, false},
|
||||
{`c:`, false},
|
||||
{`/`, false},
|
||||
{`\`, false},
|
||||
{`\Windows`, false},
|
||||
{`c:a\b`, false},
|
||||
{`c:\a\b`, true},
|
||||
{`c:/a/b`, true},
|
||||
{`\\host\share\foo`, true},
|
||||
{`//host/share/foo/bar`, true},
|
||||
}
|
||||
|
||||
func TestIsAbs(t *testing.T) {
|
||||
tests := append(isabstests, winisabstests...)
|
||||
// All non-windows tests should fail, because they have no volume letter.
|
||||
for _, test := range isabstests {
|
||||
tests = append(tests, IsAbsTest{test.path, false})
|
||||
}
|
||||
// All non-windows test should work as intended if prefixed with volume letter.
|
||||
for _, test := range isabstests {
|
||||
tests = append(tests, IsAbsTest{"c:" + test.path, test.isAbs})
|
||||
}
|
||||
|
||||
for _, test := range winisabstests {
|
||||
if r := isAbs(test.path); r != test.isAbs {
|
||||
t.Errorf("IsAbs(%q) = %v, want %v", test.path, r, test.isAbs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,8 +104,8 @@ func (c *Endpoint) ClientOpts() ([]client.Opt, error) {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result,
|
||||
client.WithHost(c.Host),
|
||||
withHTTPClient(tlsConfig),
|
||||
client.WithHost(c.Host),
|
||||
)
|
||||
|
||||
} else {
|
||||
|
||||
@ -1,9 +1,15 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
api "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@ -17,6 +23,8 @@ type EndpointMeta struct {
|
||||
Exec *clientcmdapi.ExecConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
var _ command.EndpointDefaultResolver = &EndpointMeta{}
|
||||
|
||||
// Endpoint is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, with TLS data
|
||||
type Endpoint struct {
|
||||
@ -24,6 +32,12 @@ type Endpoint struct {
|
||||
TLSData *context.TLSData
|
||||
}
|
||||
|
||||
func init() {
|
||||
command.RegisterDefaultStoreEndpoints(
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
}
|
||||
|
||||
// WithTLSData loads TLS materials for the endpoint
|
||||
func (c *EndpointMeta) WithTLSData(s store.Reader, contextName string) (Endpoint, error) {
|
||||
tlsData, err := context.LoadTLSData(s, contextName, KubernetesEndpoint)
|
||||
@ -61,6 +75,32 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
return clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
}
|
||||
|
||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||
// endpoint, which is derived from the env-based kubeconfig.
|
||||
func (c *EndpointMeta) ResolveDefault(stackOrchestrator command.Orchestrator) (interface{}, *store.EndpointTLSData, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
if stackOrchestrator == command.OrchestratorKubernetes || stackOrchestrator == command.OrchestratorAll {
|
||||
return nil, nil, errors.Wrapf(err, "default orchestrator is %s but unable to resolve kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
|
||||
// We deliberately quash the error here, returning nil
|
||||
// for the first argument is sufficient to indicate we weren't able to
|
||||
// provide a default
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var tls *store.EndpointTLSData
|
||||
if kubeEP.TLSData != nil {
|
||||
tls = kubeEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
return kubeEP.EndpointMeta, tls, nil
|
||||
}
|
||||
|
||||
// EndpointFromContext extracts kubernetes endpoint info from current context
|
||||
func EndpointFromContext(metadata store.Metadata) *EndpointMeta {
|
||||
ep, ok := metadata.Endpoints[KubernetesEndpoint]
|
||||
@ -91,5 +131,5 @@ func ConfigFromContext(name string, s store.Reader) (clientcmd.ClientConfig, err
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return kubernetes.NewKubernetesConfig(""), nil
|
||||
return api.NewKubernetesConfig(""), nil
|
||||
}
|
||||
|
||||
25
cli/context/kubernetes/load_test.go
Normal file
25
cli/context/kubernetes/load_test.go
Normal file
@ -0,0 +1,25 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/env"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
defer env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")()
|
||||
configFile := &configfile.ConfigFile{
|
||||
StackOrchestrator: "all",
|
||||
}
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, configFile, command.DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, command.OrchestratorAll, ctx.Meta.Metadata.(command.DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
||||
29
cli/context/store/io_utils.go
Normal file
29
cli/context/store/io_utils.go
Normal file
@ -0,0 +1,29 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// LimitedReader is a fork of io.LimitedReader to override Read.
|
||||
type LimitedReader struct {
|
||||
R io.Reader
|
||||
N int64 // max bytes remaining
|
||||
}
|
||||
|
||||
// Read is a fork of io.LimitedReader.Read that returns an error when limit exceeded.
|
||||
func (l *LimitedReader) Read(p []byte) (n int, err error) {
|
||||
if l.N < 0 {
|
||||
return 0, errors.New("read exceeds the defined limit")
|
||||
}
|
||||
if l.N == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// have to cap N + 1 otherwise we won't hit limit err
|
||||
if int64(len(p)) > l.N+1 {
|
||||
p = p[0 : l.N+1]
|
||||
}
|
||||
n, err = l.R.Read(p)
|
||||
l.N -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
24
cli/context/store/io_utils_test.go
Normal file
24
cli/context/store/io_utils_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestLimitReaderReadAll(t *testing.T) {
|
||||
r := strings.NewReader("Reader")
|
||||
|
||||
_, err := ioutil.ReadAll(r)
|
||||
assert.NilError(t, err)
|
||||
|
||||
r = strings.NewReader("Test")
|
||||
_, err = ioutil.ReadAll(&LimitedReader{R: r, N: 4})
|
||||
assert.NilError(t, err)
|
||||
|
||||
r = strings.NewReader("Test")
|
||||
_, err = ioutil.ReadAll(&LimitedReader{R: r, N: 2})
|
||||
assert.Error(t, err, "read exceeds the defined limit")
|
||||
}
|
||||
@ -2,12 +2,16 @@ package store
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
_ "crypto/sha256" // ensure ids can be computed
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -259,12 +263,44 @@ func Export(name string, s Reader) io.ReadCloser {
|
||||
return reader
|
||||
}
|
||||
|
||||
const (
|
||||
maxAllowedFileSizeToImport int64 = 10 << 20
|
||||
zipType string = "application/zip"
|
||||
)
|
||||
|
||||
func getImportContentType(r *bufio.Reader) (string, error) {
|
||||
head, err := r.Peek(512)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return http.DetectContentType(head), nil
|
||||
}
|
||||
|
||||
// Import imports an exported context into a store
|
||||
func Import(name string, s Writer, reader io.Reader) error {
|
||||
tr := tar.NewReader(reader)
|
||||
// Buffered reader will not advance the buffer, needed to determine content type
|
||||
r := bufio.NewReader(reader)
|
||||
|
||||
importContentType, err := getImportContentType(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch importContentType {
|
||||
case zipType:
|
||||
return importZip(name, s, r)
|
||||
default:
|
||||
// Assume it's a TAR (TAR does not have a "magic number")
|
||||
return importTar(name, s, r)
|
||||
}
|
||||
}
|
||||
|
||||
func importTar(name string, s Writer, reader io.Reader) error {
|
||||
tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
tlsData := ContextTLSData{
|
||||
Endpoints: map[string]EndpointTLSData{},
|
||||
}
|
||||
var importedMetaFile bool
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@ -282,37 +318,119 @@ func Import(name string, s Writer, reader io.Reader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var meta Metadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
meta, err := parseMetadata(data, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.Name = name
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
importedMetaFile = true
|
||||
} else if strings.HasPrefix(hdr.Name, "tls/") {
|
||||
relative := strings.TrimPrefix(hdr.Name, "tls/")
|
||||
parts := strings.SplitN(relative, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return errors.New("archive format is invalid")
|
||||
}
|
||||
endpointName := parts[0]
|
||||
fileName := parts[1]
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := tlsData.Endpoints[endpointName]; !ok {
|
||||
tlsData.Endpoints[endpointName] = EndpointTLSData{
|
||||
Files: map[string][]byte{},
|
||||
}
|
||||
if err := importEndpointTLS(&tlsData, hdr.Name, data); err != nil {
|
||||
return err
|
||||
}
|
||||
tlsData.Endpoints[endpointName].Files[fileName] = data
|
||||
}
|
||||
}
|
||||
if !importedMetaFile {
|
||||
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
|
||||
}
|
||||
return s.ResetTLSMaterial(name, &tlsData)
|
||||
}
|
||||
|
||||
func importZip(name string, s Writer, reader io.Reader) error {
|
||||
body, err := ioutil.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zr, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsData := ContextTLSData{
|
||||
Endpoints: map[string]EndpointTLSData{},
|
||||
}
|
||||
|
||||
var importedMetaFile bool
|
||||
for _, zf := range zr.File {
|
||||
fi := zf.FileInfo()
|
||||
if fi.IsDir() {
|
||||
// skip this entry, only taking files into account
|
||||
continue
|
||||
}
|
||||
if zf.Name == metaFile {
|
||||
f, err := zf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := parseMetadata(data, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
importedMetaFile = true
|
||||
} else if strings.HasPrefix(zf.Name, "tls/") {
|
||||
f, err := zf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := ioutil.ReadAll(f)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = importEndpointTLS(&tlsData, zf.Name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !importedMetaFile {
|
||||
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
|
||||
}
|
||||
return s.ResetTLSMaterial(name, &tlsData)
|
||||
}
|
||||
|
||||
func parseMetadata(data []byte, name string) (Metadata, error) {
|
||||
var meta Metadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return meta, err
|
||||
}
|
||||
meta.Name = name
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func importEndpointTLS(tlsData *ContextTLSData, path string, data []byte) error {
|
||||
parts := strings.SplitN(strings.TrimPrefix(path, "tls/"), "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// TLS endpoints require archived file directory with 2 layers
|
||||
// i.e. tls/{endpointName}/{fileName}
|
||||
return errors.New("archive format is invalid")
|
||||
}
|
||||
|
||||
epName := parts[0]
|
||||
fileName := parts[1]
|
||||
if _, ok := tlsData.Endpoints[epName]; !ok {
|
||||
tlsData.Endpoints[epName] = EndpointTLSData{
|
||||
Files: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
tlsData.Endpoints[epName].Files[fileName] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
type setContextName interface {
|
||||
setContext(name string)
|
||||
}
|
||||
|
||||
@ -1,9 +1,16 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
@ -125,3 +132,127 @@ func TestErrHasCorrectContext(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "no-exists")
|
||||
assert.Check(t, IsErrContextDoesNotExist(err))
|
||||
}
|
||||
|
||||
func TestDetectImportContentType(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
r := bufio.NewReader(buf)
|
||||
ct, err := getImportContentType(r)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, zipType != ct)
|
||||
}
|
||||
|
||||
func TestImportTarInvalid(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
tf := path.Join(testDir, "test.context")
|
||||
|
||||
f, err := os.Create(tf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
tw := tar.NewWriter(f)
|
||||
hdr := &tar.Header{
|
||||
Name: "dummy-file",
|
||||
Mode: 0600,
|
||||
Size: int64(len("hello world")),
|
||||
}
|
||||
err = tw.WriteHeader(hdr)
|
||||
assert.NilError(t, err)
|
||||
_, err = tw.Write([]byte("hello world"))
|
||||
assert.NilError(t, err)
|
||||
err = tw.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(tf)
|
||||
assert.NilError(t, err)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("tarInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
}
|
||||
|
||||
func TestImportZip(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
zf := path.Join(testDir, "test.zip")
|
||||
|
||||
f, err := os.Create(zf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
meta, err := json.Marshal(Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
"ep1": endpoint{Foo: "bar"},
|
||||
},
|
||||
Metadata: context{Bar: "baz"},
|
||||
Name: "source",
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"meta.json", string(meta)},
|
||||
{path.Join("tls", "docker", "ca.pem"), string([]byte("ca.pem"))},
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
f, err := w.Create(file.Name)
|
||||
assert.NilError(t, err)
|
||||
_, err = f.Write([]byte(file.Body))
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
err = w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(zf)
|
||||
assert.NilError(t, err)
|
||||
ct, err := getImportContentType(bufio.NewReader(source))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, zipType, ct)
|
||||
|
||||
source, _ = os.Open(zf)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("zipTest", s, r)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestImportZipInvalid(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
zf := path.Join(testDir, "test.zip")
|
||||
|
||||
f, err := os.Create(zf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
df, err := w.Create("dummy-file")
|
||||
assert.NilError(t, err)
|
||||
_, err = df.Write([]byte("hello world"))
|
||||
assert.NilError(t, err)
|
||||
err = w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(zf)
|
||||
assert.NilError(t, err)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("zipInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
}
|
||||
|
||||
@ -30,6 +30,16 @@ func (c Config) SetEndpoint(name string, getter TypeGetter) {
|
||||
c.endpointTypes[name] = getter
|
||||
}
|
||||
|
||||
// ForeachEndpointType calls cb on every endpoint type registered with the Config
|
||||
func (c Config) ForeachEndpointType(cb func(string, TypeGetter) error) error {
|
||||
for n, ep := range c.endpointTypes {
|
||||
if err := cb(n, ep); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfig creates a config object
|
||||
func NewConfig(contextType TypeGetter, endpoints ...NamedTypeGetter) Config {
|
||||
res := Config{
|
||||
|
||||
@ -69,16 +69,22 @@ func newDockerCommand(dockerCli *command.DockerCli) *cli.TopLevelCommand {
|
||||
return cli.NewTopLevelCommand(cmd, dockerCli, opts, flags)
|
||||
}
|
||||
|
||||
func setFlagErrorFunc(dockerCli *command.DockerCli, cmd *cobra.Command) {
|
||||
func setFlagErrorFunc(dockerCli command.Cli, cmd *cobra.Command) {
|
||||
// When invoking `docker stack --nonsense`, we need to make sure FlagErrorFunc return appropriate
|
||||
// output if the feature is not supported.
|
||||
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
|
||||
// is called.
|
||||
flagErrorFunc := cmd.FlagErrorFunc()
|
||||
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
|
||||
if err := pluginmanager.AddPluginCommandStubs(dockerCli, cmd.Root()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := isSupported(cmd, dockerCli); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hideUnsupportedFeatures(cmd, dockerCli); err != nil {
|
||||
return err
|
||||
}
|
||||
return flagErrorFunc(cmd, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -2112,8 +2112,8 @@ _docker_container_run_and_create() {
|
||||
return
|
||||
;;
|
||||
--security-opt)
|
||||
COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") )
|
||||
if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then
|
||||
COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp= systempaths=unconfined" -- "$cur") )
|
||||
if [[ ${COMPREPLY[*]} = *= ]] ; then
|
||||
__docker_nospace
|
||||
fi
|
||||
return
|
||||
@ -2342,11 +2342,15 @@ _docker_context_create() {
|
||||
--description|--docker|--kubernetes)
|
||||
return
|
||||
;;
|
||||
--from)
|
||||
__docker_complete_contexts
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--default-stack-orchestrator --description --docker --help --kubernetes" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--default-stack-orchestrator --description --docker --from --help --kubernetes" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@ -2617,36 +2621,15 @@ _docker_daemon() {
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo "$cur" | tr '[:upper:]' '[:lower:]')" ) )
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs overlay2 vfs zfs" -- "$(echo "$cur" | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
--storage-opt)
|
||||
local btrfs_options="btrfs.min_space"
|
||||
local devicemapper_options="
|
||||
dm.basesize
|
||||
dm.blkdiscard
|
||||
dm.blocksize
|
||||
dm.directlvm_device
|
||||
dm.fs
|
||||
dm.libdm_log_level
|
||||
dm.loopdatasize
|
||||
dm.loopmetadatasize
|
||||
dm.min_free_space
|
||||
dm.mkfsarg
|
||||
dm.mountopt
|
||||
dm.override_udev_sync_check
|
||||
dm.thinpooldev
|
||||
dm.thinp_autoextend_percent
|
||||
dm.thinp_autoextend_threshold
|
||||
dm.thinp_metapercent
|
||||
dm.thinp_percent
|
||||
dm.use_deferred_deletion
|
||||
dm.use_deferred_removal
|
||||
"
|
||||
local overlay2_options="overlay2.size"
|
||||
local zfs_options="zfs.fsname"
|
||||
|
||||
local all_options="$btrfs_options $devicemapper_options $overlay2_options $zfs_options"
|
||||
local all_options="$btrfs_options $overlay2_options $zfs_options"
|
||||
|
||||
case $(__docker_value_of_option '--storage-driver|-s') in
|
||||
'')
|
||||
@ -2655,9 +2638,6 @@ _docker_daemon() {
|
||||
btrfs)
|
||||
COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
devicemapper)
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
overlay2)
|
||||
COMPREPLY=( $( compgen -W "$overlay2_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
@ -5178,12 +5158,16 @@ _docker_system_events() {
|
||||
__docker_complete_networks --cur "${cur##*=}"
|
||||
return
|
||||
;;
|
||||
node)
|
||||
__docker_complete_nodes --cur "${cur##*=}"
|
||||
return
|
||||
;;
|
||||
scope)
|
||||
COMPREPLY=( $( compgen -W "local swarm" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
type)
|
||||
COMPREPLY=( $( compgen -W "config container daemon image network plugin secret service volume" -- "${cur##*=}" ) )
|
||||
COMPREPLY=( $( compgen -W "config container daemon image network node plugin secret service volume" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
volume)
|
||||
@ -5194,7 +5178,7 @@ _docker_system_events() {
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network scope type volume" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network node scope type volume" -- "$cur" ) )
|
||||
__docker_nospace
|
||||
return
|
||||
;;
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
# - Felix Riedel
|
||||
# - Steve Durrheimer
|
||||
# - Vincent Bernat
|
||||
# - Rohan Verma
|
||||
#
|
||||
# license:
|
||||
#
|
||||
@ -2784,7 +2785,7 @@ __docker_subcommand() {
|
||||
$opts_help \
|
||||
"($help -p --password)"{-p=,--password=}"[Password]:password: " \
|
||||
"($help)--password-stdin[Read password from stdin]" \
|
||||
"($help -u --user)"{-u=,--user=}"[Username]:username: " \
|
||||
"($help -u --username)"{-u=,--username=}"[Username]:username: " \
|
||||
"($help -)1:server: " && ret=0
|
||||
;;
|
||||
(logout)
|
||||
|
||||
@ -21,38 +21,38 @@ ifeq ($(DOCKER_CLI_GO_BUILD_CACHE),y)
|
||||
DOCKER_CLI_MOUNTS += -v "$(CACHE_VOLUME_NAME):/root/.cache/go-build"
|
||||
endif
|
||||
VERSION = $(shell cat VERSION)
|
||||
ENVVARS = -e VERSION=$(VERSION) -e GITCOMMIT -e PLATFORM -e TESTFLAGS -e TESTDIRS
|
||||
ENVVARS = -e VERSION=$(VERSION) -e GITCOMMIT -e PLATFORM -e TESTFLAGS -e TESTDIRS -e GOOS -e GOARCH -e GOARM
|
||||
|
||||
# build docker image (dockerfiles/Dockerfile.build)
|
||||
.PHONY: build_docker_image
|
||||
build_docker_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.dev | docker build ${DOCKER_BUILD_ARGS} -t $(DEV_DOCKER_IMAGE_NAME) -
|
||||
cat ./dockerfiles/Dockerfile.dev | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(DEV_DOCKER_IMAGE_NAME) -
|
||||
|
||||
# build docker image having the linting tools (dockerfiles/Dockerfile.lint)
|
||||
.PHONY: build_linter_image
|
||||
build_linter_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.lint | docker build ${DOCKER_BUILD_ARGS} -t $(LINTER_IMAGE_NAME) -
|
||||
cat ./dockerfiles/Dockerfile.lint | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(LINTER_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_cross_image
|
||||
build_cross_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.cross | docker build ${DOCKER_BUILD_ARGS} -t $(CROSS_IMAGE_NAME) -
|
||||
cat ./dockerfiles/Dockerfile.cross | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(CROSS_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_shell_validate_image
|
||||
build_shell_validate_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.shellcheck | docker build -t $(VALIDATE_IMAGE_NAME) -
|
||||
cat ./dockerfiles/Dockerfile.shellcheck | docker build --build-arg=GO_VERSION -t $(VALIDATE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_binary_native_image
|
||||
build_binary_native_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.binary-native | docker build -t $(BINARY_NATIVE_IMAGE_NAME) -
|
||||
cat ./dockerfiles/Dockerfile.binary-native | docker build --build-arg=GO_VERSION -t $(BINARY_NATIVE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_e2e_image
|
||||
build_e2e_image:
|
||||
docker build -t $(E2E_IMAGE_NAME) --build-arg VERSION=$(VERSION) --build-arg GITCOMMIT=$(GITCOMMIT) -f ./dockerfiles/Dockerfile.e2e .
|
||||
docker build -t $(E2E_IMAGE_NAME) --build-arg=GO_VERSION --build-arg VERSION=$(VERSION) --build-arg GITCOMMIT=$(GITCOMMIT) -f ./dockerfiles/Dockerfile.e2e .
|
||||
|
||||
DOCKER_RUN_NAME_OPTION := $(if $(DOCKER_CLI_CONTAINER_NAME),--name $(DOCKER_CLI_CONTAINER_NAME),)
|
||||
DOCKER_RUN := docker run --rm $(ENVVARS) $(DOCKER_CLI_MOUNTS) $(DOCKER_RUN_NAME_OPTION)
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
FROM dockercore/golang-cross:1.12.4
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM dockercore/golang-cross:${GO_VERSION}
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
COPY . .
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates curl
|
||||
|
||||
|
||||
@ -1,6 +1,4 @@
|
||||
ARG GO_VERSION=1.12.4
|
||||
|
||||
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
# Use Debian based image as docker-compose requires glibc.
|
||||
FROM golang:${GO_VERSION}
|
||||
@ -9,10 +7,6 @@ RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
curl \
|
||||
openssl \
|
||||
btrfs-tools \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev \
|
||||
iptables \
|
||||
openssh-client \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
RUN apk add -U git
|
||||
|
||||
|
||||
@ -19,6 +19,19 @@ The following list of features are deprecated in Engine.
|
||||
To learn more about Docker Engine's deprecation policy,
|
||||
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
|
||||
|
||||
### Pushing and pulling with image manifest v2 schema 1
|
||||
|
||||
**Deprecated in Release: v19.03.0**
|
||||
|
||||
**Target For Removal In Release: v19.09.0**
|
||||
|
||||
The image manifest
|
||||
[v2 schema 1](https://github.com/docker/distribution/blob/fda42e5ef908bdba722d435ff1f330d40dfcd56c/docs/spec/manifest-v2-1.md)
|
||||
format is deprecated in favor of the
|
||||
[v2 schema 2](https://github.com/docker/distribution/blob/fda42e5ef908bdba722d435ff1f330d40dfcd56c/docs/spec/manifest-v2-2.md) format.
|
||||
|
||||
If the registry you are using still supports v2 schema 1, urge their administrators to move to v2 schema 2.
|
||||
|
||||
### Legacy "overlay" storage driver
|
||||
|
||||
**Deprecated in Release: v18.09.0**
|
||||
@ -50,6 +63,24 @@ Now that support for `overlay2` is added to all supported distros (as they are
|
||||
either on kernel 4.x, or have support for multiple lowerdirs backported), there
|
||||
is no reason to continue maintenance of the `devicemapper` storage driver.
|
||||
|
||||
### AuFS storage driver
|
||||
|
||||
**Deprecated in Release: v19.03.0**
|
||||
|
||||
The `aufs` storage driver is deprecated in favor of `overlay2`, and will
|
||||
be removed in a future release. Users of the `aufs` storage driver are
|
||||
recommended to migrate to a different storage driver, such as `overlay2`, which
|
||||
is now the default storage driver.
|
||||
|
||||
The `aufs` storage driver facilitates running Docker on distros that have no
|
||||
support for OverlayFS, such as Ubuntu 14.04 LTS, which originally shipped with
|
||||
a 3.14 kernel.
|
||||
|
||||
Now that Ubuntu 14.04 is no longer a supported distro for Docker, and `overlay2`
|
||||
is available to all supported distros (as they are either on kernel 4.x, or have
|
||||
support for multiple lowerdirs backported), there is no reason to continue
|
||||
maintenance of the `aufs` storage driver.
|
||||
|
||||
### Reserved namespaces in engine labels
|
||||
|
||||
**Deprecated in Release: v18.06.0**
|
||||
|
||||
@ -812,7 +812,7 @@ Defaults to 20G.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt size=40G
|
||||
```
|
||||
|
||||
@ -827,7 +827,7 @@ deployments).
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.globalmode=false
|
||||
```
|
||||
|
||||
@ -838,7 +838,7 @@ used for booting a utility VM. Defaults to `%ProgramFiles%\Linux Containers`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.kirdpath=c:\path\to\files
|
||||
```
|
||||
|
||||
@ -849,7 +849,7 @@ Defaults to `bootx64.efi`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.kernel=kernel.efi
|
||||
```
|
||||
|
||||
@ -860,7 +860,7 @@ Defaults to `initrd.img`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.initrd=myinitrd.img
|
||||
```
|
||||
|
||||
@ -872,7 +872,7 @@ are kernel specific.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt "lcow.bootparameters='option=value'"
|
||||
```
|
||||
|
||||
@ -883,7 +883,7 @@ and initrd booting. Defaults to `uvm.vhdx` under `lcow.kirdpath`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.vhdx=custom.vhdx
|
||||
```
|
||||
|
||||
@ -894,7 +894,7 @@ to 300.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.timeout=240
|
||||
```
|
||||
|
||||
@ -905,7 +905,7 @@ containers. Defaults to 20. Cannot be less than 20.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.sandboxsize=40
|
||||
```
|
||||
|
||||
|
||||
@ -31,7 +31,12 @@ Options:
|
||||
## Description
|
||||
|
||||
Use `docker events` to get real-time events from the server. These events differ
|
||||
per Docker object type.
|
||||
per Docker object type. Different event types have different scopes. Local
|
||||
scoped events are only seen on the node they take place on, and swarm scoped
|
||||
events are seen on all managers.
|
||||
|
||||
Only the last 1000 log events are returned. You can use filters to further limit
|
||||
the number of events returned.
|
||||
|
||||
### Object types
|
||||
|
||||
@ -160,6 +165,9 @@ that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
|
||||
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
|
||||
fraction of a second no more than nine digits long.
|
||||
|
||||
Only the last 1000 log events are returned. You can use filters to further limit
|
||||
the number of events returned.
|
||||
|
||||
#### Filtering
|
||||
|
||||
The filtering flag (`-f` or `--filter`) format is of "key=value". If you would
|
||||
|
||||
@ -605,6 +605,33 @@ PS C:\> docker run --device=class/86E0D1E0-8089-11D0-9CE4-08003E301F73 mcr.micro
|
||||
> Windows containers. This option fails if the container isolation is `hyperv`
|
||||
> or when running Linux Containers on Windows (LCOW).
|
||||
|
||||
### Access an NVIDIA GPU
|
||||
|
||||
The `--gpus` flag allows you to access NVIDIA GPU resources. First you need to
|
||||
install [nvidia-container-runtime](https://nvidia.github.io/nvidia-container-runtime/).
|
||||
Visit [Specify a container's resources](https://docs.docker.com/config/containers/resource_constraints/)
|
||||
for more information.
|
||||
|
||||
To use `--gpus`, specify which GPUs (or all) to use. If no value is provied, all
|
||||
available GPUs are used. The example below exposes all available GPUs.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus all ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
Use the `device` option to specify GPUs. The example below exposes a specific
|
||||
GPU.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
The example below exposes the first and third GPUs.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=0,2 nvidia-smi
|
||||
```
|
||||
|
||||
### Restart policies (--restart)
|
||||
|
||||
Use Docker's `--restart` to specify a container's *restart policy*. A restart
|
||||
@ -753,7 +780,7 @@ operating system older than Windows 10 1809 with `--isolation process` will fail
|
||||
On Windows server, assuming the default configuration, these commands are equivalent
|
||||
and result in `process` isolation:
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run -d microsoft/nanoserver powershell echo process
|
||||
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process
|
||||
PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process
|
||||
@ -763,7 +790,7 @@ If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`,
|
||||
are running against a Windows client-based daemon, these commands are equivalent and
|
||||
result in `hyperv` isolation:
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv
|
||||
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv
|
||||
PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv
|
||||
|
||||
@ -1433,7 +1433,7 @@ today=Wednesday
|
||||
HOME=/root
|
||||
```
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run --rm -e "foo=bar" microsoft/nanoserver cmd /s /c set
|
||||
ALLUSERSPROFILE=C:\ProgramData
|
||||
APPDATA=C:\Users\ContainerAdministrator\AppData\Roaming
|
||||
|
||||
@ -74,18 +74,36 @@ func TestGlobalHelp(t *testing.T) {
|
||||
assert.Assert(t, is.Equal(badmetacount, 1))
|
||||
|
||||
// Running with `--help` should produce the same.
|
||||
res2 := icmd.RunCmd(run("--help"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
t.Run("help_flag", func(t *testing.T) {
|
||||
res2 := icmd.RunCmd(run("--help"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), res.Stdout()))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), ""))
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), res.Stdout()))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), ""))
|
||||
|
||||
// Running just `docker` (without `help` nor `--help`) should produce the same thing, except on Stderr.
|
||||
res2 = icmd.RunCmd(run())
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
t.Run("bare", func(t *testing.T) {
|
||||
res2 := icmd.RunCmd(run())
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), res.Stdout()))
|
||||
})
|
||||
|
||||
t.Run("badopt", func(t *testing.T) {
|
||||
// Running `docker --badopt` should also produce the
|
||||
// same thing, give or take the leading error message
|
||||
// and a trailing carriage return (due to main() using
|
||||
// Println in the error case).
|
||||
res2 := icmd.RunCmd(run("--badopt"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 125,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
exp := "unknown flag: --badopt\nSee 'docker --help'.\n" + res.Stdout() + "\n"
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), exp))
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), res.Stdout()))
|
||||
}
|
||||
|
||||
36
e2e/cli-plugins/plugins/nopersistentprerun/main.go
Normal file
36
e2e/cli-plugins/plugins/nopersistentprerun/main.go
Normal file
@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Run(func(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "nopersistentprerun",
|
||||
Short: "Testing without PersistentPreRun hooks",
|
||||
//PersistentPreRunE: Not specified, we need to test that it works in the absence of an explicit call
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cli := dockerCli.Client()
|
||||
ping, err := cli.Ping(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(ping.APIVersion)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
},
|
||||
manager.Metadata{
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: "testing",
|
||||
})
|
||||
}
|
||||
@ -213,15 +213,24 @@ func TestGoodSubcommandHelp(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestCliInitialized tests the code paths which ensure that the Cli
|
||||
// object is initialized even if the plugin uses PersistentRunE
|
||||
// object is initialized whether the plugin uses PersistentRunE or not
|
||||
func TestCliInitialized(t *testing.T) {
|
||||
run, _, cleanup := prepare(t)
|
||||
defer cleanup()
|
||||
|
||||
res := icmd.RunCmd(run("helloworld", "--pre-run", "apiversion"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, res.Stdout() != "")
|
||||
assert.Assert(t, is.Equal(res.Stderr(), "Plugin PersistentPreRunE called"))
|
||||
var apiversion string
|
||||
t.Run("withhook", func(t *testing.T) {
|
||||
res := icmd.RunCmd(run("helloworld", "--pre-run", "apiversion"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, res.Stdout() != "")
|
||||
apiversion = res.Stdout()
|
||||
assert.Assert(t, is.Equal(res.Stderr(), "Plugin PersistentPreRunE called"))
|
||||
})
|
||||
t.Run("withouthook", func(t *testing.T) {
|
||||
res := icmd.RunCmd(run("nopersistentprerun"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, is.Equal(res.Stdout(), apiversion))
|
||||
})
|
||||
}
|
||||
|
||||
// TestPluginErrorCode tests when the plugin return with a given exit status.
|
||||
|
||||
@ -5,9 +5,11 @@ services:
|
||||
image: 'registry:2'
|
||||
|
||||
engine:
|
||||
image: 'docker:${TEST_ENGINE_VERSION:-edge-dind}'
|
||||
image: 'docker:${TEST_ENGINE_VERSION:-stable-dind}'
|
||||
privileged: true
|
||||
command: ['--insecure-registry=registry:5000']
|
||||
environment:
|
||||
- DOCKER_TLS_CERTDIR=
|
||||
|
||||
notary-server:
|
||||
build:
|
||||
|
||||
21
e2e/context/context_test.go
Normal file
21
e2e/context/context_test.go
Normal file
@ -0,0 +1,21 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/golden"
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestContextList(t *testing.T) {
|
||||
cmd := icmd.Command("docker", "context", "ls")
|
||||
cmd.Env = append(cmd.Env,
|
||||
"DOCKER_CONFIG=./testdata/test-dockerconfig",
|
||||
"KUBECONFIG=./testdata/test-kubeconfig",
|
||||
)
|
||||
result := icmd.RunCmd(cmd).Assert(t, icmd.Expected{
|
||||
Err: icmd.None,
|
||||
ExitCode: 0,
|
||||
})
|
||||
golden.Assert(t, result.Stdout(), "context-ls.golden")
|
||||
}
|
||||
17
e2e/context/main_test.go
Normal file
17
e2e/context/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := environment.Setup(); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
3
e2e/context/testdata/context-ls.golden
vendored
Normal file
3
e2e/context/testdata/context-ls.golden
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock https://someserver (zoinx) swarm
|
||||
remote my remote cluster ssh://someserver https://someserver (default) kubernetes
|
||||
7
e2e/context/testdata/test-dockerconfig/config.json
vendored
Normal file
7
e2e/context/testdata/test-dockerconfig/config.json
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"auths": {},
|
||||
"HttpHeaders": {
|
||||
"User-Agent": "Docker-Client/19.09.0-dev (linux)"
|
||||
},
|
||||
"credsStore": "secretservice"
|
||||
}
|
||||
@ -0,0 +1 @@
|
||||
{"Name":"remote","Metadata":{"Description":"my remote cluster","StackOrchestrator":"kubernetes"},"Endpoints":{"docker":{"Host":"ssh://someserver","SkipTLSVerify":false},"kubernetes":{"Host":"https://someserver","SkipTLSVerify":false,"DefaultNamespace":"default","Exec":{"command":"heptio-authenticator-aws","args":["token","-i","eks-cf"],"env":null,"apiVersion":"client.authentication.k8s.io/v1alpha1"}}}}
|
||||
20
e2e/context/testdata/test-kubeconfig
vendored
Normal file
20
e2e/context/testdata/test-kubeconfig
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
||||
@ -169,6 +169,12 @@ func (c *FakeCli) ErrBuffer() *bytes.Buffer {
|
||||
return c.err
|
||||
}
|
||||
|
||||
// ResetOutputBuffers resets the .OutBuffer() and.ErrBuffer() back to empty
|
||||
func (c *FakeCli) ResetOutputBuffers() {
|
||||
c.outBuffer.Reset()
|
||||
c.err.Reset()
|
||||
}
|
||||
|
||||
// SetNotaryClient sets the internal getter for retrieving a NotaryClient
|
||||
func (c *FakeCli) SetNotaryClient(notaryClientFunc NotaryClientFuncType) {
|
||||
c.notaryClientFunc = notaryClientFunc
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import api "github.com/docker/compose-on-kubernetes/api"
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file based on
|
||||
// the KUBECONFIG environment variable and command line flags.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api.NewKubernetesConfig instead
|
||||
var NewKubernetesConfig = api.NewKubernetesConfig
|
||||
@ -53,8 +53,8 @@ func TestParseHost(t *testing.T) {
|
||||
func TestParseDockerDaemonHost(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
|
||||
"tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
"tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
"tcp:a.b.c.d": "",
|
||||
"tcp:a.b.c.d/path": "",
|
||||
"udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375",
|
||||
"tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock",
|
||||
@ -83,7 +83,7 @@ func TestParseDockerDaemonHost(t *testing.T) {
|
||||
"localhost:5555/path": "tcp://localhost:5555/path",
|
||||
}
|
||||
for invalidAddr, expectedError := range invalids {
|
||||
if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError {
|
||||
if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || expectedError != "" && err.Error() != expectedError {
|
||||
t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr)
|
||||
}
|
||||
}
|
||||
@ -99,8 +99,8 @@ func TestParseTCP(t *testing.T) {
|
||||
defaultHTTPHost = "tcp://127.0.0.1:2376"
|
||||
)
|
||||
invalids := map[string]string{
|
||||
"tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
"tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
"tcp:a.b.c.d": "",
|
||||
"tcp:a.b.c.d/path": "",
|
||||
"udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375",
|
||||
}
|
||||
@ -125,7 +125,7 @@ func TestParseTCP(t *testing.T) {
|
||||
"localhost:5555/path": "tcp://localhost:5555/path",
|
||||
}
|
||||
for invalidAddr, expectedError := range invalids {
|
||||
if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError {
|
||||
if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || expectedError != "" && err.Error() != expectedError {
|
||||
t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,5 +22,18 @@ export LDFLAGS="\
|
||||
|
||||
GOOS="${GOOS:-$(go env GOHOSTOS)}"
|
||||
GOARCH="${GOARCH:-$(go env GOHOSTARCH)}"
|
||||
export TARGET="build/docker-$GOOS-$GOARCH"
|
||||
if [ "${GOARCH}" = "arm" ]; then
|
||||
GOARM="${GOARM:-$(go env GOHOSTARM)}"
|
||||
fi
|
||||
|
||||
TARGET="build/docker-$GOOS-$GOARCH"
|
||||
if [ "${GOARCH}" = "arm" ] && [ -n "${GOARM}" ]; then
|
||||
TARGET="${TARGET}-v${GOARM}"
|
||||
fi
|
||||
|
||||
if [ "${GOOS}" = "windows" ]; then
|
||||
TARGET="${TARGET}.exe"
|
||||
fi
|
||||
export TARGET
|
||||
|
||||
export SOURCE="github.com/docker/cli/cmd/docker"
|
||||
|
||||
27
vendor.conf
27
vendor.conf
@ -3,19 +3,20 @@ github.com/agl/ed25519 5312a61534124124185d41f09206
|
||||
github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/containerd ceba56893a76f22cf0126c46d835c80fb3833408
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||
github.com/containerd/containerd 3a3f0aac8819165839a41fee77a4f4ac8b103097
|
||||
github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7
|
||||
github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
|
||||
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||
github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
|
||||
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
||||
github.com/cpuguy83/go-md2man 20f5889cbdc3c73dbd2862796665e7c465ade7d1 # v1.0.8
|
||||
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
||||
github.com/dgrijalva/jwt-go a2c85815a77d0f951e33ba4db5ae93629a1530af
|
||||
github.com/docker/compose-on-kubernetes 7a68f5c914c7e06d7a08dc71608f41811c91f0bc # v0.4.21
|
||||
github.com/docker/compose-on-kubernetes cc4914dfd1b6684a9750a59f3613fc0a95291824 # v0.4.23
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
github.com/docker/docker ac48309ac4024b5bfe21a126b1a23a3e93521d75
|
||||
github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962
|
||||
github.com/docker/docker a004854097417a591c3f6a3aeaab75efae3c5814 https://github.com/docker/engine.git # 19.03 branch
|
||||
github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3
|
||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
@ -23,11 +24,11 @@ github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/docker/licensing 9781369abdb5281cdc07a2a446c6df01347ec793
|
||||
github.com/docker/swarmkit 59163bf75df38489d4a10392265d27156dc473c5
|
||||
github.com/docker/swarmkit 48eb1828ce81be20b25d647f6ca8f33d599f705c
|
||||
github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0
|
||||
github.com/gofrs/flock 7f43ea2e6a643ad441fc12d0ecc0d3388b300c53 # v0.7.0
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/gogo/protobuf 4cbf7e384e768b4e01799441fdf2a706a5635ae7 # v1.2.0
|
||||
github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
|
||||
github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf aa810b61a9c79d51363740d207bb46cf8e620ed5 # v1.2.0
|
||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||
@ -51,13 +52,13 @@ github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5
|
||||
github.com/Microsoft/hcsshim 672e52e9209d1e53718c1b6a7d68cc9272654ab5
|
||||
github.com/miekg/pkcs11 6120d95c0e9576ccf4a78ba40855809dca31a9ed
|
||||
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
|
||||
github.com/moby/buildkit 8818c67cff663befa7b70f21454e340f71616581
|
||||
github.com/moby/buildkit f238f1efb04f00bf0cc147141fda9ddb55c8bc49
|
||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
|
||||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
|
||||
github.com/opencontainers/runc 029124da7af7360afa781a0234d1b083550f797c # v1.0.0-rc7-6-g029124da
|
||||
github.com/opencontainers/runc 425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
|
||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
|
||||
@ -77,7 +78,7 @@ github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
golang.org/x/crypto 38d8ce5564a5b71b2e3a00553993f1b9a7ae852f
|
||||
golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3
|
||||
golang.org/x/net eb5bcb51f2a31c7d5141d810b70815c05d9c9146
|
||||
golang.org/x/oauth2 ef147856a6ddbb60760db74283d2424e98c87bff
|
||||
golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c
|
||||
@ -85,7 +86,7 @@ golang.org/x/sys 4b34438f7a67ee5f45cc6132e2ba
|
||||
golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
|
||||
golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
|
||||
google.golang.org/genproto 02b4e95473316948020af0b7a4f0f22c73929b0e
|
||||
google.golang.org/grpc 7a6a684ca69eb4cae85ad0a484f2e531598c047b # v1.12.2
|
||||
google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
|
||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
|
||||
gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0
|
||||
|
||||
18
vendor/github.com/containerd/console/LICENSE
generated
vendored
18
vendor/github.com/containerd/console/LICENSE
generated
vendored
@ -1,6 +1,7 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@ -175,24 +176,13 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
10
vendor/github.com/containerd/console/README.md
generated
vendored
10
vendor/github.com/containerd/console/README.md
generated
vendored
@ -15,3 +15,13 @@ if err := current.SetRaw(); err != nil {
|
||||
ws, err := current.Size()
|
||||
current.Resize(ws)
|
||||
```
|
||||
|
||||
## Project details
|
||||
|
||||
console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
1055
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
1055
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1553
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
1553
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
444
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
444
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
@ -1,36 +1,20 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||
|
||||
/*
|
||||
Package diff is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ApplyRequest
|
||||
ApplyResponse
|
||||
DiffRequest
|
||||
DiffResponse
|
||||
*/
|
||||
package diff
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import containerd_types "github.com/containerd/containerd/api/types"
|
||||
import containerd_types1 "github.com/containerd/containerd/api/types"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
types "github.com/containerd/containerd/api/types"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -45,32 +29,94 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ApplyRequest struct {
|
||||
// Diff is the descriptor of the diff to be extracted
|
||||
Diff *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
||||
Mounts []*containerd_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
||||
Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
|
||||
Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||
func (*ApplyRequest) ProtoMessage() {}
|
||||
func (*ApplyRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{0} }
|
||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||
func (*ApplyRequest) ProtoMessage() {}
|
||||
func (*ApplyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{0}
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplyRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ApplyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplyRequest proto.InternalMessageInfo
|
||||
|
||||
type ApplyResponse struct {
|
||||
// Applied is the descriptor for the object which was applied.
|
||||
// If the input was a compressed blob then the result will be
|
||||
// the descriptor for the uncompressed blob.
|
||||
Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
||||
Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||
func (*ApplyResponse) ProtoMessage() {}
|
||||
func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{1} }
|
||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||
func (*ApplyResponse) ProtoMessage() {}
|
||||
func (*ApplyResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{1}
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplyResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ApplyResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplyResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplyResponse proto.InternalMessageInfo
|
||||
|
||||
type DiffRequest struct {
|
||||
// Left are the mounts which represent the older copy
|
||||
// in which is the base of the computed changes.
|
||||
Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
||||
Left []*types.Mount `protobuf:"bytes,1,rep,name=left,proto3" json:"left,omitempty"`
|
||||
// Right are the mounts which represents the newer copy
|
||||
// in which changes from the left were made into.
|
||||
Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
||||
Right []*types.Mount `protobuf:"bytes,2,rep,name=right,proto3" json:"right,omitempty"`
|
||||
// MediaType is the media type descriptor for the created diff
|
||||
// object
|
||||
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
@ -79,29 +125,129 @@ type DiffRequest struct {
|
||||
Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||
// Labels are the labels to apply to the generated content
|
||||
// on content store commit.
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||
func (*DiffRequest) ProtoMessage() {}
|
||||
func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{2} }
|
||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||
func (*DiffRequest) ProtoMessage() {}
|
||||
func (*DiffRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{2}
|
||||
}
|
||||
func (m *DiffRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DiffRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DiffRequest.Merge(m, src)
|
||||
}
|
||||
func (m *DiffRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DiffRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DiffRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DiffRequest proto.InternalMessageInfo
|
||||
|
||||
type DiffResponse struct {
|
||||
// Diff is the descriptor of the diff which can be applied
|
||||
Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
||||
Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||
func (*DiffResponse) ProtoMessage() {}
|
||||
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||
func (*DiffResponse) ProtoMessage() {}
|
||||
func (*DiffResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{3}
|
||||
}
|
||||
func (m *DiffResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DiffResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DiffResponse.Merge(m, src)
|
||||
}
|
||||
func (m *DiffResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DiffResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DiffResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
||||
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
||||
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
|
||||
proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptor_3b36a99e6faaa935)
|
||||
}
|
||||
|
||||
var fileDescriptor_3b36a99e6faaa935 = []byte{
|
||||
// 457 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
|
||||
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
|
||||
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
|
||||
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
|
||||
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
|
||||
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
|
||||
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
|
||||
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
|
||||
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
|
||||
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
|
||||
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
|
||||
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
|
||||
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
|
||||
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
|
||||
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
|
||||
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
|
||||
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
|
||||
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
|
||||
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
|
||||
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
|
||||
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
|
||||
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
|
||||
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
|
||||
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
|
||||
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
|
||||
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
|
||||
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -110,8 +256,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Diff service
|
||||
|
||||
// DiffClient is the client API for Diff service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type DiffClient interface {
|
||||
// Apply applies the content associated with the provided digests onto
|
||||
// the provided mounts. Archive content will be extracted and
|
||||
@ -132,7 +279,7 @@ func NewDiffClient(cc *grpc.ClientConn) DiffClient {
|
||||
|
||||
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
||||
out := new(ApplyResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -141,15 +288,14 @@ func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.C
|
||||
|
||||
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
||||
out := new(DiffResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Diff service
|
||||
|
||||
// DiffServer is the server API for Diff service.
|
||||
type DiffServer interface {
|
||||
// Apply applies the content associated with the provided digests onto
|
||||
// the provided mounts. Archive content will be extracted and
|
||||
@ -254,6 +400,9 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -282,6 +431,9 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -353,6 +505,9 @@ func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -381,6 +536,9 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n3
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -394,6 +552,9 @@ func encodeVarintDiff(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *ApplyRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Diff != nil {
|
||||
@ -406,20 +567,32 @@ func (m *ApplyRequest) Size() (n int) {
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ApplyResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Applied != nil {
|
||||
l = m.Applied.Size()
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DiffRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Left) > 0 {
|
||||
@ -450,16 +623,25 @@ func (m *DiffRequest) Size() (n int) {
|
||||
n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DiffResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Diff != nil {
|
||||
l = m.Diff.Size()
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -481,8 +663,9 @@ func (this *ApplyRequest) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ApplyRequest{`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -492,7 +675,8 @@ func (this *ApplyResponse) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ApplyResponse{`,
|
||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -505,18 +689,19 @@ func (this *DiffRequest) String() string {
|
||||
for k, _ := range this.Labels {
|
||||
keysForLabels = append(keysForLabels, k)
|
||||
}
|
||||
sortkeys.Strings(keysForLabels)
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||
mapStringForLabels := "map[string]string{"
|
||||
for _, k := range keysForLabels {
|
||||
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||
}
|
||||
mapStringForLabels += "}"
|
||||
s := strings.Join([]string{`&DiffRequest{`,
|
||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "types.Mount", 1) + `,`,
|
||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "types.Mount", 1) + `,`,
|
||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||
`Labels:` + mapStringForLabels + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -526,7 +711,8 @@ func (this *DiffResponse) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&DiffResponse{`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -554,7 +740,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -582,7 +768,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -591,11 +777,14 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Diff == nil {
|
||||
m.Diff = &containerd_types1.Descriptor{}
|
||||
m.Diff = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -615,7 +804,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -624,10 +813,13 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Mounts = append(m.Mounts, &containerd_types.Mount{})
|
||||
m.Mounts = append(m.Mounts, &types.Mount{})
|
||||
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -641,9 +833,13 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -668,7 +864,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -696,7 +892,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -705,11 +901,14 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Applied == nil {
|
||||
m.Applied = &containerd_types1.Descriptor{}
|
||||
m.Applied = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -724,9 +923,13 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -751,7 +954,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -779,7 +982,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -788,10 +991,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Left = append(m.Left, &containerd_types.Mount{})
|
||||
m.Left = append(m.Left, &types.Mount{})
|
||||
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -810,7 +1016,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -819,10 +1025,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Right = append(m.Right, &containerd_types.Mount{})
|
||||
m.Right = append(m.Right, &types.Mount{})
|
||||
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -841,7 +1050,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -851,6 +1060,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -870,7 +1082,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -880,6 +1092,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -899,7 +1114,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -908,6 +1123,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -928,7 +1146,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -945,7 +1163,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -955,6 +1173,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -971,7 +1192,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -981,6 +1202,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1012,9 +1236,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1039,7 +1267,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1067,7 +1295,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1076,11 +1304,14 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Diff == nil {
|
||||
m.Diff = &containerd_types1.Descriptor{}
|
||||
m.Diff = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -1095,9 +1326,13 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1161,10 +1396,13 @@ func skipDiff(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1193,6 +1431,9 @@ func skipDiff(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1211,40 +1452,3 @@ var (
|
||||
ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
|
||||
}
|
||||
|
||||
var fileDescriptorDiff = []byte{
|
||||
// 457 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
|
||||
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
|
||||
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
|
||||
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
|
||||
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
|
||||
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
|
||||
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
|
||||
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
|
||||
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
|
||||
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
|
||||
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
|
||||
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
|
||||
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
|
||||
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
|
||||
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
|
||||
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
|
||||
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
|
||||
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
|
||||
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
|
||||
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
|
||||
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
|
||||
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
|
||||
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
|
||||
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
|
||||
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
|
||||
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
|
||||
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
451
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
451
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
@ -1,43 +1,22 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||
|
||||
/*
|
||||
Package events is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PublishRequest
|
||||
ForwardRequest
|
||||
SubscribeRequest
|
||||
Envelope
|
||||
*/
|
||||
package events
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import google_protobuf2 "github.com/gogo/protobuf/types"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import typeurl "github.com/containerd/typeurl"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
github_com_containerd_typeurl "github.com/containerd/typeurl"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -52,40 +31,164 @@ var _ = time.Kitchen
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type PublishRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *types.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||
func (*PublishRequest) ProtoMessage() {}
|
||||
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
|
||||
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||
func (*PublishRequest) ProtoMessage() {}
|
||||
func (*PublishRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{0}
|
||||
}
|
||||
func (m *PublishRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PublishRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PublishRequest.Merge(m, src)
|
||||
}
|
||||
func (m *PublishRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PublishRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PublishRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PublishRequest proto.InternalMessageInfo
|
||||
|
||||
type ForwardRequest struct {
|
||||
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
|
||||
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ForwardRequest) Reset() { *m = ForwardRequest{} }
|
||||
func (*ForwardRequest) ProtoMessage() {}
|
||||
func (*ForwardRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
|
||||
func (m *ForwardRequest) Reset() { *m = ForwardRequest{} }
|
||||
func (*ForwardRequest) ProtoMessage() {}
|
||||
func (*ForwardRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{1}
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ForwardRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ForwardRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ForwardRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo
|
||||
|
||||
type SubscribeRequest struct {
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||
func (*SubscribeRequest) ProtoMessage() {}
|
||||
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
|
||||
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||
func (*SubscribeRequest) ProtoMessage() {}
|
||||
func (*SubscribeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{2}
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SubscribeRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SubscribeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo
|
||||
|
||||
type Envelope struct {
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{3} }
|
||||
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
func (*Envelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{3}
|
||||
}
|
||||
func (m *Envelope) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Envelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Envelope.Merge(m, src)
|
||||
}
|
||||
func (m *Envelope) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Envelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Envelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Envelope proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
|
||||
@ -94,6 +197,44 @@ func init() {
|
||||
proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptor_43fcd20dc1642376)
|
||||
}
|
||||
|
||||
var fileDescriptor_43fcd20dc1642376 = []byte{
|
||||
// 466 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
|
||||
0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
|
||||
0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
|
||||
0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
|
||||
0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
|
||||
0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
|
||||
0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
|
||||
0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
|
||||
0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
|
||||
0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
|
||||
0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
|
||||
0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
|
||||
0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
|
||||
0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
|
||||
0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
|
||||
0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
|
||||
0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
|
||||
0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
|
||||
0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
|
||||
0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
|
||||
0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
|
||||
0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
|
||||
0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
|
||||
0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
|
||||
0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
|
||||
0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
|
||||
0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
|
||||
0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
|
||||
0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
// Field returns the value for the given fieldpath as a string, if defined.
|
||||
// If the value is not defined, the second value will be false.
|
||||
func (m *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
@ -108,7 +249,7 @@ func (m *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
case "topic":
|
||||
return string(m.Topic), len(m.Topic) > 0
|
||||
case "event":
|
||||
decoded, err := typeurl.UnmarshalAny(m.Event)
|
||||
decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
@ -130,20 +271,21 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Events service
|
||||
|
||||
// EventsClient is the client API for Events service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type EventsClient interface {
|
||||
// Publish an event to a topic.
|
||||
//
|
||||
// The event will be packed into a timestamp envelope with the namespace
|
||||
// introspected from the context. The envelope will then be dispatched.
|
||||
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error)
|
||||
// Forward sends an event that has already been packaged into an envelope
|
||||
// with a timestamp and namespace.
|
||||
//
|
||||
// This is useful if earlier timestamping is required or when forwarding on
|
||||
// behalf of another component, namespace or publisher.
|
||||
Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error)
|
||||
// Subscribe to a stream of events, possibly returning only that match any
|
||||
// of the provided filters.
|
||||
//
|
||||
@ -162,18 +304,18 @@ func NewEventsClient(cc *grpc.ClientConn) EventsClient {
|
||||
return &eventsClient{cc}
|
||||
}
|
||||
|
||||
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
|
||||
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) {
|
||||
out := new(types.Empty)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, c.cc, opts...)
|
||||
func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) {
|
||||
out := new(types.Empty)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -181,7 +323,7 @@ func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...
|
||||
}
|
||||
|
||||
func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_Events_serviceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -212,20 +354,19 @@ func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Server API for Events service
|
||||
|
||||
// EventsServer is the server API for Events service.
|
||||
type EventsServer interface {
|
||||
// Publish an event to a topic.
|
||||
//
|
||||
// The event will be packed into a timestamp envelope with the namespace
|
||||
// introspected from the context. The envelope will then be dispatched.
|
||||
Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
|
||||
Publish(context.Context, *PublishRequest) (*types.Empty, error)
|
||||
// Forward sends an event that has already been packaged into an envelope
|
||||
// with a timestamp and namespace.
|
||||
//
|
||||
// This is useful if earlier timestamping is required or when forwarding on
|
||||
// behalf of another component, namespace or publisher.
|
||||
Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
|
||||
Forward(context.Context, *ForwardRequest) (*types.Empty, error)
|
||||
// Subscribe to a stream of events, possibly returning only that match any
|
||||
// of the provided filters.
|
||||
//
|
||||
@ -351,6 +492,9 @@ func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -379,6 +523,9 @@ func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -412,6 +559,9 @@ func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -432,8 +582,8 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintEvents(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp)))
|
||||
n3, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -460,6 +610,9 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n4
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -473,6 +626,9 @@ func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PublishRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Topic)
|
||||
@ -483,20 +639,32 @@ func (m *PublishRequest) Size() (n int) {
|
||||
l = m.Event.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ForwardRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Envelope != nil {
|
||||
l = m.Envelope.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *SubscribeRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
@ -505,13 +673,19 @@ func (m *SubscribeRequest) Size() (n int) {
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Envelope) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = types.SizeOfStdTime(m.Timestamp)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
l = len(m.Namespace)
|
||||
if l > 0 {
|
||||
@ -525,6 +699,9 @@ func (m *Envelope) Size() (n int) {
|
||||
l = m.Event.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -547,7 +724,8 @@ func (this *PublishRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PublishRequest{`,
|
||||
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -558,6 +736,7 @@ func (this *ForwardRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ForwardRequest{`,
|
||||
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -568,6 +747,7 @@ func (this *SubscribeRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&SubscribeRequest{`,
|
||||
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -577,10 +757,11 @@ func (this *Envelope) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Envelope{`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
|
||||
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -608,7 +789,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -636,7 +817,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -646,6 +827,9 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -665,7 +849,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -674,11 +858,14 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Event == nil {
|
||||
m.Event = &google_protobuf1.Any{}
|
||||
m.Event = &types.Any{}
|
||||
}
|
||||
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -693,9 +880,13 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -720,7 +911,7 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -748,7 +939,7 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -757,6 +948,9 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -776,9 +970,13 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -803,7 +1001,7 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -831,7 +1029,7 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -841,6 +1039,9 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -855,9 +1056,13 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -882,7 +1087,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -910,7 +1115,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -919,10 +1124,13 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -940,7 +1148,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -950,6 +1158,9 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -969,7 +1180,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -979,6 +1190,9 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -998,7 +1212,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1007,11 +1221,14 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Event == nil {
|
||||
m.Event = &google_protobuf1.Any{}
|
||||
m.Event = &types.Any{}
|
||||
}
|
||||
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -1026,9 +1243,13 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1092,10 +1313,13 @@ func skipEvents(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1124,6 +1348,9 @@ func skipEvents(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1142,41 +1369,3 @@ var (
|
||||
ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
|
||||
}
|
||||
|
||||
var fileDescriptorEvents = []byte{
|
||||
// 466 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
|
||||
0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
|
||||
0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
|
||||
0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
|
||||
0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
|
||||
0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
|
||||
0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
|
||||
0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
|
||||
0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
|
||||
0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
|
||||
0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
|
||||
0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
|
||||
0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
|
||||
0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
|
||||
0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
|
||||
0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
|
||||
0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
|
||||
0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
|
||||
0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
|
||||
0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
|
||||
0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
|
||||
0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
|
||||
0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
|
||||
0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
|
||||
0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
|
||||
0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
|
||||
0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
|
||||
0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
|
||||
0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
865
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
865
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
377
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
377
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
@ -1,35 +1,21 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
|
||||
|
||||
/*
|
||||
Package introspection is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Plugin
|
||||
PluginsRequest
|
||||
PluginsResponse
|
||||
*/
|
||||
package introspection
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import containerd_types "github.com/containerd/containerd/api/types"
|
||||
import google_rpc "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
types "github.com/containerd/containerd/api/types"
|
||||
rpc "github.com/gogo/googleapis/google/rpc"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -51,7 +37,7 @@ type Plugin struct {
|
||||
// ID identifies the plugin uniquely in the system.
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// Requires lists the plugin types required by this plugin.
|
||||
Requires []string `protobuf:"bytes,3,rep,name=requires" json:"requires,omitempty"`
|
||||
Requires []string `protobuf:"bytes,3,rep,name=requires,proto3" json:"requires,omitempty"`
|
||||
// Platforms enumerates the platforms this plugin will support.
|
||||
//
|
||||
// If values are provided here, the plugin will only be operable under the
|
||||
@ -61,30 +47,61 @@ type Plugin struct {
|
||||
//
|
||||
// If the plugin prefers certain platforms over others, they should be
|
||||
// listed from most to least preferred.
|
||||
Platforms []containerd_types.Platform `protobuf:"bytes,4,rep,name=platforms" json:"platforms"`
|
||||
Platforms []types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms"`
|
||||
// Exports allows plugins to provide values about state or configuration to
|
||||
// interested parties.
|
||||
//
|
||||
// One example is exposing the configured path of a snapshotter plugin.
|
||||
Exports map[string]string `protobuf:"bytes,5,rep,name=exports" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Exports map[string]string `protobuf:"bytes,5,rep,name=exports,proto3" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Capabilities allows plugins to communicate feature switches to allow
|
||||
// clients to detect features that may not be on be default or may be
|
||||
// different from version to version.
|
||||
//
|
||||
// Use this sparingly.
|
||||
Capabilities []string `protobuf:"bytes,6,rep,name=capabilities" json:"capabilities,omitempty"`
|
||||
Capabilities []string `protobuf:"bytes,6,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
|
||||
// InitErr will be set if the plugin fails initialization.
|
||||
//
|
||||
// This means the plugin may have been registered but a non-terminal error
|
||||
// was encountered during initialization.
|
||||
//
|
||||
// Plugins that have this value set cannot be used.
|
||||
InitErr *google_rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr" json:"init_err,omitempty"`
|
||||
InitErr *rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Plugin) Reset() { *m = Plugin{} }
|
||||
func (*Plugin) ProtoMessage() {}
|
||||
func (*Plugin) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{0} }
|
||||
func (m *Plugin) Reset() { *m = Plugin{} }
|
||||
func (*Plugin) ProtoMessage() {}
|
||||
func (*Plugin) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{0}
|
||||
}
|
||||
func (m *Plugin) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Plugin.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Plugin) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Plugin.Merge(m, src)
|
||||
}
|
||||
func (m *Plugin) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Plugin) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Plugin.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Plugin proto.InternalMessageInfo
|
||||
|
||||
type PluginsRequest struct {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
@ -97,27 +114,129 @@ type PluginsRequest struct {
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PluginsRequest) Reset() { *m = PluginsRequest{} }
|
||||
func (*PluginsRequest) ProtoMessage() {}
|
||||
func (*PluginsRequest) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{1} }
|
||||
func (m *PluginsRequest) Reset() { *m = PluginsRequest{} }
|
||||
func (*PluginsRequest) ProtoMessage() {}
|
||||
func (*PluginsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{1}
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PluginsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PluginsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PluginsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PluginsRequest proto.InternalMessageInfo
|
||||
|
||||
type PluginsResponse struct {
|
||||
Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins" json:"plugins"`
|
||||
Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PluginsResponse) Reset() { *m = PluginsResponse{} }
|
||||
func (*PluginsResponse) ProtoMessage() {}
|
||||
func (*PluginsResponse) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{2} }
|
||||
func (m *PluginsResponse) Reset() { *m = PluginsResponse{} }
|
||||
func (*PluginsResponse) ProtoMessage() {}
|
||||
func (*PluginsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{2}
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PluginsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PluginsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PluginsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
|
||||
proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
|
||||
proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptor_1a14fda866f10715)
|
||||
}
|
||||
|
||||
var fileDescriptor_1a14fda866f10715 = []byte{
|
||||
// 487 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
|
||||
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
|
||||
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
|
||||
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
|
||||
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
|
||||
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
|
||||
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
|
||||
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
|
||||
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
|
||||
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
|
||||
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
|
||||
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
|
||||
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
|
||||
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
|
||||
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
|
||||
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
|
||||
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
|
||||
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
|
||||
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
|
||||
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
|
||||
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
|
||||
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
|
||||
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
|
||||
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
|
||||
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
|
||||
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
|
||||
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
|
||||
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
|
||||
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
|
||||
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -126,8 +245,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Introspection service
|
||||
|
||||
// IntrospectionClient is the client API for Introspection service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type IntrospectionClient interface {
|
||||
// Plugins returns a list of plugins in containerd.
|
||||
//
|
||||
@ -146,15 +266,14 @@ func NewIntrospectionClient(cc *grpc.ClientConn) IntrospectionClient {
|
||||
|
||||
func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) {
|
||||
out := new(PluginsResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Introspection service
|
||||
|
||||
// IntrospectionServer is the server API for Introspection service.
|
||||
type IntrospectionServer interface {
|
||||
// Plugins returns a list of plugins in containerd.
|
||||
//
|
||||
@ -294,6 +413,9 @@ func (m *Plugin) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -327,6 +449,9 @@ func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -357,6 +482,9 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -370,6 +498,9 @@ func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Plugin) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
@ -410,10 +541,16 @@ func (m *Plugin) Size() (n int) {
|
||||
l = m.InitErr.Size()
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginsRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
@ -422,10 +559,16 @@ func (m *PluginsRequest) Size() (n int) {
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginsResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Plugins) > 0 {
|
||||
@ -434,6 +577,9 @@ func (m *PluginsResponse) Size() (n int) {
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -458,7 +604,7 @@ func (this *Plugin) String() string {
|
||||
for k, _ := range this.Exports {
|
||||
keysForExports = append(keysForExports, k)
|
||||
}
|
||||
sortkeys.Strings(keysForExports)
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForExports)
|
||||
mapStringForExports := "map[string]string{"
|
||||
for _, k := range keysForExports {
|
||||
mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k])
|
||||
@ -468,10 +614,11 @@ func (this *Plugin) String() string {
|
||||
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Requires:` + fmt.Sprintf("%v", this.Requires) + `,`,
|
||||
`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "containerd_types.Platform", 1), `&`, ``, 1) + `,`,
|
||||
`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "types.Platform", 1), `&`, ``, 1) + `,`,
|
||||
`Exports:` + mapStringForExports + `,`,
|
||||
`Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`,
|
||||
`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "google_rpc.Status", 1) + `,`,
|
||||
`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -482,6 +629,7 @@ func (this *PluginsRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PluginsRequest{`,
|
||||
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -492,6 +640,7 @@ func (this *PluginsResponse) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PluginsResponse{`,
|
||||
`Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "Plugin", "Plugin", 1), `&`, ``, 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -519,7 +668,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -547,7 +696,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -557,6 +706,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -576,7 +728,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -586,6 +738,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -605,7 +760,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -615,6 +770,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -634,7 +792,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -643,10 +801,13 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Platforms = append(m.Platforms, containerd_types.Platform{})
|
||||
m.Platforms = append(m.Platforms, types.Platform{})
|
||||
if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -665,7 +826,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -674,6 +835,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -694,7 +858,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -711,7 +875,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -721,6 +885,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -737,7 +904,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -747,6 +914,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -783,7 +953,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -793,6 +963,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -812,7 +985,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -821,11 +994,14 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.InitErr == nil {
|
||||
m.InitErr = &google_rpc.Status{}
|
||||
m.InitErr = &rpc.Status{}
|
||||
}
|
||||
if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -840,9 +1016,13 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -867,7 +1047,7 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -895,7 +1075,7 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -905,6 +1085,9 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -919,9 +1102,13 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -946,7 +1133,7 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -974,7 +1161,7 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -983,6 +1170,9 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1000,9 +1190,13 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1066,10 +1260,13 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1098,6 +1295,9 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1116,42 +1316,3 @@ var (
|
||||
ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptorIntrospection)
|
||||
}
|
||||
|
||||
var fileDescriptorIntrospection = []byte{
|
||||
// 487 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
|
||||
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
|
||||
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
|
||||
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
|
||||
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
|
||||
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
|
||||
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
|
||||
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
|
||||
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
|
||||
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
|
||||
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
|
||||
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
|
||||
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
|
||||
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
|
||||
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
|
||||
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
|
||||
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
|
||||
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
|
||||
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
|
||||
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
|
||||
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
|
||||
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
|
||||
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
|
||||
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
|
||||
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
|
||||
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
|
||||
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
|
||||
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
|
||||
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
|
||||
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
583
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
583
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
797
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
797
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1490
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
1490
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2224
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
2224
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
177
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
177
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
@ -1,31 +1,19 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
|
||||
/*
|
||||
Package version is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
|
||||
It has these top-level messages:
|
||||
VersionResponse
|
||||
*/
|
||||
package version
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/types"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -39,18 +27,73 @@ var _ = math.Inf
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type VersionResponse struct {
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||
func (*VersionResponse) ProtoMessage() {}
|
||||
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||
func (*VersionResponse) ProtoMessage() {}
|
||||
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_128109001e578ffe, []int{0}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VersionResponse.Merge(m, src)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *VersionResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VersionResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VersionResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe)
|
||||
}
|
||||
|
||||
var fileDescriptor_128109001e578ffe = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -59,10 +102,11 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Version service
|
||||
|
||||
// VersionClient is the client API for Version service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type VersionClient interface {
|
||||
Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
||||
Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
type versionClient struct {
|
||||
@ -73,19 +117,18 @@ func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
||||
return &versionClient{cc}
|
||||
}
|
||||
|
||||
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||
func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||
out := new(VersionResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Version service
|
||||
|
||||
// VersionServer is the server API for Version service.
|
||||
type VersionServer interface {
|
||||
Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error)
|
||||
Version(context.Context, *types.Empty) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
||||
@ -93,7 +136,7 @@ func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
||||
}
|
||||
|
||||
func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(google_protobuf.Empty)
|
||||
in := new(types.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -105,7 +148,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
||||
FullMethod: "/containerd.services.version.v1.Version/Version",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
||||
return srv.(VersionServer).Version(ctx, req.(*types.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@ -150,6 +193,9 @@ func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
|
||||
i += copy(dAtA[i:], m.Revision)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -163,6 +209,9 @@ func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *VersionResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Version)
|
||||
@ -173,6 +222,9 @@ func (m *VersionResponse) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovVersion(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -196,6 +248,7 @@ func (this *VersionResponse) String() string {
|
||||
s := strings.Join([]string{`&VersionResponse{`,
|
||||
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
|
||||
`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -223,7 +276,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -251,7 +304,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -261,6 +314,9 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -280,7 +336,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -290,6 +346,9 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -304,9 +363,13 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -370,10 +433,13 @@ func skipVersion(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -402,6 +468,9 @@ func skipVersion(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -420,27 +489,3 @@ var (
|
||||
ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
|
||||
}
|
||||
|
||||
var fileDescriptorVersion = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
333
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
333
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
@ -1,35 +1,18 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/types/descriptor.proto
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/types/descriptor.proto
|
||||
github.com/containerd/containerd/api/types/metrics.proto
|
||||
github.com/containerd/containerd/api/types/mount.proto
|
||||
github.com/containerd/containerd/api/types/platform.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Descriptor
|
||||
Metric
|
||||
Mount
|
||||
Platform
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -48,18 +31,80 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
// oci descriptor found in a manifest.
|
||||
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
|
||||
type Descriptor struct {
|
||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
||||
func (*Descriptor) ProtoMessage() {}
|
||||
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
||||
func (*Descriptor) ProtoMessage() {}
|
||||
func (*Descriptor) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_37f958df3707db9e, []int{0}
|
||||
}
|
||||
func (m *Descriptor) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Descriptor) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Descriptor.Merge(m, src)
|
||||
}
|
||||
func (m *Descriptor) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Descriptor) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Descriptor.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Descriptor proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e)
|
||||
}
|
||||
|
||||
var fileDescriptor_37f958df3707db9e = []byte{
|
||||
// 311 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||
0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29,
|
||||
0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90,
|
||||
0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90,
|
||||
0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20,
|
||||
0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82,
|
||||
0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c,
|
||||
0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
|
||||
0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62,
|
||||
0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04,
|
||||
0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70,
|
||||
0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a,
|
||||
0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1,
|
||||
0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22,
|
||||
0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -92,6 +137,26 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
for k, _ := range m.Annotations {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
v := m.Annotations[k]
|
||||
mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
|
||||
i += copy(dAtA[i:], k)
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -105,6 +170,9 @@ func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Descriptor) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.MediaType)
|
||||
@ -118,6 +186,17 @@ func (m *Descriptor) Size() (n int) {
|
||||
if m.Size_ != 0 {
|
||||
n += 1 + sovDescriptor(uint64(m.Size_))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
for k, v := range m.Annotations {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||
n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -138,10 +217,22 @@ func (this *Descriptor) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
keysForAnnotations := make([]string, 0, len(this.Annotations))
|
||||
for k, _ := range this.Annotations {
|
||||
keysForAnnotations = append(keysForAnnotations, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
|
||||
mapStringForAnnotations := "map[string]string{"
|
||||
for _, k := range keysForAnnotations {
|
||||
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
|
||||
}
|
||||
mapStringForAnnotations += "}"
|
||||
s := strings.Join([]string{`&Descriptor{`,
|
||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
|
||||
`Annotations:` + mapStringForAnnotations + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -169,7 +260,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -197,7 +288,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -207,6 +298,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -226,7 +320,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -236,6 +330,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -255,11 +352,138 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Size_ |= (int64(b) & 0x7F) << shift
|
||||
m.Size_ |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Annotations == nil {
|
||||
m.Annotations = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
m.Annotations[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||
@ -269,9 +493,13 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -335,10 +563,13 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -367,6 +598,9 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -385,26 +619,3 @@ var (
|
||||
ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
|
||||
}
|
||||
|
||||
var fileDescriptorDescriptor = []byte{
|
||||
// 234 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
|
||||
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
|
||||
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
|
||||
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
|
||||
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
|
||||
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
|
||||
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
|
||||
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
|
||||
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
|
||||
0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
@ -15,4 +15,5 @@ message Descriptor {
|
||||
string media_type = 1;
|
||||
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
int64 size = 3;
|
||||
map<string, string> annotations = 5;
|
||||
}
|
||||
|
||||
180
vendor/github.com/containerd/containerd/api/types/metrics.pb.go
generated
vendored
180
vendor/github.com/containerd/containerd/api/types/metrics.pb.go
generated
vendored
@ -3,22 +3,17 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import types1 "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -26,19 +21,82 @@ var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Metric struct {
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8d594d87edf6e6bc, []int{0}
|
||||
}
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(m, src)
|
||||
}
|
||||
func (m *Metric) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Metric) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metric.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc)
|
||||
}
|
||||
|
||||
var fileDescriptor_8d594d87edf6e6bc = []byte{
|
||||
// 258 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
|
||||
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Metric) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -56,8 +114,8 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp)))
|
||||
n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -78,6 +136,9 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -91,9 +152,12 @@ func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Metric) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = types1.SizeOfStdTime(m.Timestamp)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
l = len(m.ID)
|
||||
if l > 0 {
|
||||
@ -103,6 +167,9 @@ func (m *Metric) Size() (n int) {
|
||||
l = m.Data.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -124,9 +191,10 @@ func (this *Metric) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Metric{`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -154,7 +222,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -182,7 +250,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -191,10 +259,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -212,7 +283,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -222,6 +293,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -241,7 +315,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -250,11 +324,14 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Data == nil {
|
||||
m.Data = &google_protobuf1.Any{}
|
||||
m.Data = &types.Any{}
|
||||
}
|
||||
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -269,9 +346,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -335,10 +416,13 @@ func skipMetrics(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -367,6 +451,9 @@ func skipMetrics(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -385,28 +472,3 @@ var (
|
||||
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
|
||||
}
|
||||
|
||||
var fileDescriptorMetrics = []byte{
|
||||
// 258 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
|
||||
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
150
vendor/github.com/containerd/containerd/api/types/mount.pb.go
generated
vendored
150
vendor/github.com/containerd/containerd/api/types/mount.pb.go
generated
vendored
@ -3,22 +3,26 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Mount describes mounts for a container.
|
||||
//
|
||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
||||
@ -35,16 +39,69 @@ type Mount struct {
|
||||
// Target path in container
|
||||
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
|
||||
// Options specifies zero or more fstab style mount options.
|
||||
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
|
||||
Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Mount) Reset() { *m = Mount{} }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
||||
func (m *Mount) Reset() { *m = Mount{} }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_920196890d4a7b9f, []int{0}
|
||||
}
|
||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Mount) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Mount.Merge(m, src)
|
||||
}
|
||||
func (m *Mount) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Mount) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Mount.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Mount proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f)
|
||||
}
|
||||
|
||||
var fileDescriptor_920196890d4a7b9f = []byte{
|
||||
// 202 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
|
||||
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -93,6 +150,9 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -106,6 +166,9 @@ func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Mount) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
@ -126,6 +189,9 @@ func (m *Mount) Size() (n int) {
|
||||
n += 1 + l + sovMount(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -151,6 +217,7 @@ func (this *Mount) String() string {
|
||||
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
|
||||
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
|
||||
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -178,7 +245,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -206,7 +273,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -216,6 +283,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -235,7 +305,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -245,6 +315,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -264,7 +337,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -274,6 +347,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -293,7 +369,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -303,6 +379,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -317,9 +396,13 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -383,10 +466,13 @@ func skipMount(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -415,6 +501,9 @@ func skipMount(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -433,24 +522,3 @@ var (
|
||||
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
|
||||
}
|
||||
|
||||
var fileDescriptorMount = []byte{
|
||||
// 202 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
|
||||
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
149
vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
149
vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
@ -3,37 +3,94 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Platform follows the structure of the OCI platform specification, from
|
||||
// descriptors.
|
||||
type Platform struct {
|
||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Platform) Reset() { *m = Platform{} }
|
||||
func (*Platform) ProtoMessage() {}
|
||||
func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} }
|
||||
func (m *Platform) Reset() { *m = Platform{} }
|
||||
func (*Platform) ProtoMessage() {}
|
||||
func (*Platform) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_24ba7a4b83e2367e, []int{0}
|
||||
}
|
||||
func (m *Platform) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Platform.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Platform) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Platform.Merge(m, src)
|
||||
}
|
||||
func (m *Platform) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Platform) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Platform.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Platform proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e)
|
||||
}
|
||||
|
||||
var fileDescriptor_24ba7a4b83e2367e = []byte{
|
||||
// 205 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
|
||||
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
|
||||
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
|
||||
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
|
||||
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
|
||||
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
|
||||
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
|
||||
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
|
||||
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Platform) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -67,6 +124,9 @@ func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
|
||||
i += copy(dAtA[i:], m.Variant)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -80,6 +140,9 @@ func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Platform) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.OS)
|
||||
@ -94,6 +157,9 @@ func (m *Platform) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlatform(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -118,6 +184,7 @@ func (this *Platform) String() string {
|
||||
`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
|
||||
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
|
||||
`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -145,7 +212,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -173,7 +240,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -183,6 +250,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -202,7 +272,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -212,6 +282,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -231,7 +304,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -241,6 +314,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -255,9 +331,13 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -321,10 +401,13 @@ func skipPlatform(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -353,6 +436,9 @@ func skipPlatform(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -371,24 +457,3 @@ var (
|
||||
ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform)
|
||||
}
|
||||
|
||||
var fileDescriptorPlatform = []byte{
|
||||
// 205 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
|
||||
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
|
||||
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
|
||||
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
|
||||
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
|
||||
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
|
||||
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
|
||||
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
|
||||
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
329
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
329
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
@ -1,34 +1,19 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/types/task/task.proto
|
||||
|
||||
/*
|
||||
Package task is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/types/task/task.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Process
|
||||
ProcessInfo
|
||||
*/
|
||||
package task
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
import google_protobuf2 "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -61,6 +46,7 @@ var Status_name = map[int32]string{
|
||||
4: "PAUSED",
|
||||
5: "PAUSING",
|
||||
}
|
||||
|
||||
var Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"CREATED": 1,
|
||||
@ -73,24 +59,58 @@ var Status_value = map[string]int32{
|
||||
func (x Status) String() string {
|
||||
return proto.EnumName(Status_name, int32(x))
|
||||
}
|
||||
func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
|
||||
|
||||
type Process struct {
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
|
||||
func (Status) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
||||
}
|
||||
|
||||
func (m *Process) Reset() { *m = Process{} }
|
||||
func (*Process) ProtoMessage() {}
|
||||
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
|
||||
type Process struct {
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Process) Reset() { *m = Process{} }
|
||||
func (*Process) ProtoMessage() {}
|
||||
func (*Process) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
||||
}
|
||||
func (m *Process) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Process.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Process) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Process.Merge(m, src)
|
||||
}
|
||||
func (m *Process) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Process) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Process.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Process proto.InternalMessageInfo
|
||||
|
||||
type ProcessInfo struct {
|
||||
// PID is the process ID.
|
||||
@ -98,18 +118,93 @@ type ProcessInfo struct {
|
||||
// Info contains additional process information.
|
||||
//
|
||||
// Info varies by platform.
|
||||
Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
|
||||
Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
|
||||
func (*ProcessInfo) ProtoMessage() {}
|
||||
func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
|
||||
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
|
||||
func (*ProcessInfo) ProtoMessage() {}
|
||||
func (*ProcessInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{1}
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProcessInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
|
||||
proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
|
||||
proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
|
||||
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16)
|
||||
}
|
||||
|
||||
var fileDescriptor_391ef18c8ab0dc16 = []byte{
|
||||
// 545 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
|
||||
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
|
||||
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
|
||||
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
|
||||
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
|
||||
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
|
||||
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
|
||||
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
|
||||
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
|
||||
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
|
||||
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
|
||||
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
|
||||
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
|
||||
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
|
||||
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
|
||||
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
|
||||
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
|
||||
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
|
||||
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
|
||||
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
|
||||
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
|
||||
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
|
||||
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
|
||||
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
|
||||
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
|
||||
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
|
||||
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
|
||||
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
|
||||
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
|
||||
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
|
||||
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
|
||||
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
|
||||
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
func (m *Process) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -182,12 +277,15 @@ func (m *Process) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
dAtA[i] = 0x52
|
||||
i++
|
||||
i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
|
||||
n1, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
||||
i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -221,6 +319,9 @@ func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -234,6 +335,9 @@ func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Process) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ContainerID)
|
||||
@ -268,12 +372,18 @@ func (m *Process) Size() (n int) {
|
||||
if m.ExitStatus != 0 {
|
||||
n += 1 + sovTask(uint64(m.ExitStatus))
|
||||
}
|
||||
l = types.SizeOfStdTime(m.ExitedAt)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
|
||||
n += 1 + l + sovTask(uint64(l))
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ProcessInfo) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Pid != 0 {
|
||||
@ -283,6 +393,9 @@ func (m *ProcessInfo) Size() (n int) {
|
||||
l = m.Info.Size()
|
||||
n += 1 + l + sovTask(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -313,7 +426,8 @@ func (this *Process) String() string {
|
||||
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
|
||||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
|
||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -324,7 +438,8 @@ func (this *ProcessInfo) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ProcessInfo{`,
|
||||
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
|
||||
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`,
|
||||
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -352,7 +467,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -380,7 +495,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -390,6 +505,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -409,7 +527,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -419,6 +537,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -438,7 +559,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Pid |= (uint32(b) & 0x7F) << shift
|
||||
m.Pid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -457,7 +578,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Status |= (Status(b) & 0x7F) << shift
|
||||
m.Status |= Status(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -476,7 +597,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -486,6 +607,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -505,7 +629,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -515,6 +639,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -534,7 +661,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -544,6 +671,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -563,7 +693,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -583,7 +713,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ExitStatus |= (uint32(b) & 0x7F) << shift
|
||||
m.ExitStatus |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -602,7 +732,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -611,10 +741,13 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -627,9 +760,13 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -654,7 +791,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -682,7 +819,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Pid |= (uint32(b) & 0x7F) << shift
|
||||
m.Pid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -701,7 +838,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -710,11 +847,14 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Info == nil {
|
||||
m.Info = &google_protobuf2.Any{}
|
||||
m.Info = &types.Any{}
|
||||
}
|
||||
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -729,9 +869,13 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -795,10 +939,13 @@ func skipTask(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -827,6 +974,9 @@ func skipTask(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -845,46 +995,3 @@ var (
|
||||
ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask)
|
||||
}
|
||||
|
||||
var fileDescriptorTask = []byte{
|
||||
// 545 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
|
||||
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
|
||||
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
|
||||
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
|
||||
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
|
||||
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
|
||||
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
|
||||
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
|
||||
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
|
||||
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
|
||||
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
|
||||
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
|
||||
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
|
||||
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
|
||||
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
|
||||
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
|
||||
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
|
||||
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
|
||||
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
|
||||
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
|
||||
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
|
||||
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
|
||||
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
|
||||
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
|
||||
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
|
||||
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
|
||||
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
|
||||
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
|
||||
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
|
||||
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
|
||||
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
|
||||
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
|
||||
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
35
vendor/github.com/containerd/containerd/client.go
generated
vendored
35
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -136,6 +136,20 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
if copts.services == nil && c.conn == nil {
|
||||
return nil, errors.New("no grpc connection or services is available")
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
||||
namespaces := c.NamespaceService()
|
||||
ctx := context.Background()
|
||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
||||
c.runtime = defaultRuntime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -152,6 +166,20 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||
conn: conn,
|
||||
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
||||
namespaces := c.NamespaceService()
|
||||
ctx := context.Background()
|
||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
||||
c.runtime = defaultRuntime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if copts.services != nil {
|
||||
c.services = *copts.services
|
||||
}
|
||||
@ -594,6 +622,13 @@ func (c *Client) VersionService() versionservice.VersionClient {
|
||||
return versionservice.NewVersionClient(c.conn)
|
||||
}
|
||||
|
||||
// Conn returns the underlying GRPC connection object
|
||||
func (c *Client) Conn() *grpc.ClientConn {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Version of containerd
|
||||
type Version struct {
|
||||
// Version number
|
||||
|
||||
9
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
9
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
@ -70,10 +70,11 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta
|
||||
for _, d := range task.Descriptors {
|
||||
platformSpec := platforms.DefaultSpec()
|
||||
index.Manifests = append(index.Manifests, imagespec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size_,
|
||||
Digest: d.Digest,
|
||||
Platform: &platformSpec,
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size_,
|
||||
Digest: d.Digest,
|
||||
Platform: &platformSpec,
|
||||
Annotations: d.Annotations,
|
||||
})
|
||||
}
|
||||
// save copts
|
||||
|
||||
21
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
21
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
@ -20,7 +20,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
@ -107,7 +109,7 @@ func WithSnapshotter(name string) NewContainerOpts {
|
||||
// WithSnapshot uses an existing root filesystem for the container
|
||||
func WithSnapshot(id string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
// check that the snapshot exists, if not, fail on creation
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||
return err
|
||||
@ -125,7 +127,7 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
@ -155,7 +157,7 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
@ -166,9 +168,18 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
||||
}
|
||||
}
|
||||
|
||||
func setSnapshotterIfEmpty(c *containers.Container) {
|
||||
func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) {
|
||||
if c.Snapshotter == "" {
|
||||
c.Snapshotter = DefaultSnapshotter
|
||||
defaultSnapshotter := DefaultSnapshotter
|
||||
namespaceService := client.NamespaceService()
|
||||
if ns, err := namespaces.NamespaceRequired(ctx); err == nil {
|
||||
if labels, err := namespaceService.Labels(ctx, ns); err == nil {
|
||||
if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok {
|
||||
defaultSnapshotter = snapshotLabel
|
||||
}
|
||||
}
|
||||
}
|
||||
c.Snapshotter = defaultSnapshotter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
@ -50,7 +50,7 @@ func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool
|
||||
return err
|
||||
}
|
||||
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
|
||||
var (
|
||||
snapshotter = client.SnapshotService(c.Snapshotter)
|
||||
|
||||
78
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
78
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
@ -33,6 +33,7 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containerd/continuity"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -477,6 +478,35 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
||||
return w, nil // lock is now held by w.
|
||||
}
|
||||
|
||||
func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
|
||||
path, _, data := s.ingestPaths(ref)
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
||||
}
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return status, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
fp, err := os.Open(data)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
||||
bufPool.Put(p)
|
||||
fp.Close()
|
||||
return status, err
|
||||
}
|
||||
|
||||
// writer provides the main implementation of the Writer method. The caller
|
||||
// must hold the lock correctly and release on error if there is a problem.
|
||||
func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
|
||||
@ -498,45 +528,25 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
updatedAt time.Time
|
||||
)
|
||||
|
||||
foundValidIngest := false
|
||||
// ensure that the ingest path has been created.
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed reading status of resume write")
|
||||
status, err := s.resumeStatus(ref, total, digester)
|
||||
if err == nil {
|
||||
foundValidIngest = true
|
||||
updatedAt = status.UpdatedAt
|
||||
startedAt = status.StartedAt
|
||||
total = status.Total
|
||||
offset = status.Offset
|
||||
} else {
|
||||
logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
fp, err := os.Open(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
||||
bufPool.Put(p)
|
||||
fp.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updatedAt = status.UpdatedAt
|
||||
startedAt = status.StartedAt
|
||||
total = status.Total
|
||||
} else {
|
||||
if !foundValidIngest {
|
||||
startedAt = time.Now()
|
||||
updatedAt = startedAt
|
||||
|
||||
@ -546,11 +556,11 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
||||
if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
||||
if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
5
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
5
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
@ -74,6 +74,9 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
@ -81,8 +84,6 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
fp := w.fp
|
||||
w.fp = nil
|
||||
|
||||
|
||||
6
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
6
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
@ -23,4 +23,10 @@ const (
|
||||
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||
// sending protobufs passed over the GRPC API.
|
||||
DefaultMaxSendMsgSize = 16 << 20
|
||||
// DefaultRuntimeNSLabel defines the namespace label to check for
|
||||
// default runtime
|
||||
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||
// DefaultSnapshotterNSLabel defines the namespances label to check for
|
||||
// default snapshotter
|
||||
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||
)
|
||||
|
||||
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
@ -26,10 +26,10 @@ import (
|
||||
var (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
|
||||
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
|
||||
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
14
vendor/github.com/containerd/containerd/diff.go
generated
vendored
14
vendor/github.com/containerd/containerd/diff.go
generated
vendored
@ -80,17 +80,19 @@ func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...di
|
||||
|
||||
func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
||||
return &types.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
14
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
@ -137,16 +137,18 @@ func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
|
||||
|
||||
func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size: desc.Size_,
|
||||
Digest: desc.Digest,
|
||||
MediaType: desc.MediaType,
|
||||
Size: desc.Size_,
|
||||
Digest: desc.Digest,
|
||||
Annotations: desc.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func descToProto(desc *ocispec.Descriptor) types.Descriptor {
|
||||
return types.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size_: desc.Size,
|
||||
Digest: desc.Digest,
|
||||
MediaType: desc.MediaType,
|
||||
Size_: desc.Size,
|
||||
Digest: desc.Digest,
|
||||
Annotations: desc.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
26
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
26
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Self retrieves a list of mounts for the current running process.
|
||||
@ -41,13 +43,15 @@ func Self() ([]Info, error) {
|
||||
func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
out := []Info{}
|
||||
|
||||
var err error
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
if err = s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
See http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
|
||||
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
@ -68,7 +72,7 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
numFields := len(fields)
|
||||
if numFields < 10 {
|
||||
// should be at least 10 fields
|
||||
return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
|
||||
return nil, errors.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
|
||||
}
|
||||
p := Info{}
|
||||
// ignore any numbers parsing errors, as there should not be any
|
||||
@ -76,13 +80,19 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
p.Parent, _ = strconv.Atoi(fields[1])
|
||||
mm := strings.Split(fields[2], ":")
|
||||
if len(mm) != 2 {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
||||
return nil, errors.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
||||
}
|
||||
p.Major, _ = strconv.Atoi(mm[0])
|
||||
p.Minor, _ = strconv.Atoi(mm[1])
|
||||
|
||||
p.Root = fields[3]
|
||||
p.Mountpoint = fields[4]
|
||||
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
|
||||
}
|
||||
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
|
||||
}
|
||||
p.Options = fields[5]
|
||||
|
||||
// one or more optional fields, when a separator (-)
|
||||
@ -101,11 +111,11 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
}
|
||||
}
|
||||
if i == numFields {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: missing separator ('-')", text)
|
||||
return nil, errors.Errorf("parsing '%s' failed: missing separator ('-')", text)
|
||||
}
|
||||
// There should be 3 fields after the separator...
|
||||
if i+4 > numFields {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: not enough fields after a separator", text)
|
||||
return nil, errors.Errorf("parsing '%s' failed: not enough fields after a separator", text)
|
||||
}
|
||||
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
|
||||
// (like "//serv/My Documents") _may_ end up having a space in the last field
|
||||
|
||||
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
@ -741,7 +741,9 @@ func WithCapabilities(caps []string) SpecOpts {
|
||||
}
|
||||
|
||||
// WithAllCapabilities sets all linux capabilities for the process
|
||||
var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
|
||||
var WithAllCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
|
||||
return WithCapabilities(GetAllCapabilities())(ctx, client, c, s)
|
||||
}
|
||||
|
||||
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
|
||||
// or CAP_BLOCK_SUSPEND on RHEL6
|
||||
@ -771,11 +773,14 @@ func capsContain(caps []string, s string) bool {
|
||||
}
|
||||
|
||||
func removeCap(caps *[]string, s string) {
|
||||
for i, c := range *caps {
|
||||
var newcaps []string
|
||||
for _, c := range *caps {
|
||||
if c == s {
|
||||
*caps = append((*caps)[:i], (*caps)[i+1:]...)
|
||||
continue
|
||||
}
|
||||
newcaps = append(newcaps, c)
|
||||
}
|
||||
*caps = newcaps
|
||||
}
|
||||
|
||||
// WithAddedCapabilities adds the provided capabilities
|
||||
|
||||
11
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
11
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -123,6 +124,16 @@ type Service interface {
|
||||
Register(*grpc.Server) error
|
||||
}
|
||||
|
||||
// TTRPCService allows TTRPC services to be registered with the underlying server
|
||||
type TTRPCService interface {
|
||||
RegisterTTRPC(*ttrpc.Server) error
|
||||
}
|
||||
|
||||
// TCPService allows GRPC services to be registered with the underlying tcp server
|
||||
type TCPService interface {
|
||||
RegisterTCP(*grpc.Server) error
|
||||
}
|
||||
|
||||
var register = struct {
|
||||
sync.RWMutex
|
||||
r []*Registration
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user