Compare commits
59 Commits
v20.10.6
...
v19.03.0-r
| Author | SHA1 | Date | |
|---|---|---|---|
| a63faebcf1 | |||
| 49236a4391 | |||
| 17b3250f0f | |||
| 5d246f4998 | |||
| f913afa98c | |||
| 90f256aeab | |||
| ee10970b05 | |||
| 35c929ed5e | |||
| 60eb4ceaf7 | |||
| 1b15368c47 | |||
| a720cf572f | |||
| ec7a9ad6e4 | |||
| 5e413159e5 | |||
| d4226d2f73 | |||
| a7c10adf4e | |||
| a4f41d94db | |||
| 71e1883ca0 | |||
| 06eb05570a | |||
| a1b83ffd2c | |||
| 649097ffe0 | |||
| 57f1de13b3 | |||
| c5431132d7 | |||
| c66cebee7a | |||
| c105a58f65 | |||
| 545fd2ad76 | |||
| 315f7d7d04 | |||
| 6aedc5e912 | |||
| 3ac398aa49 | |||
| 781c427788 | |||
| 47e66c5812 | |||
| 9933222452 | |||
| 3f5553548b | |||
| c8273616ee | |||
| 8aebc31806 | |||
| 57ef4e32f4 | |||
| c15fb3a8e5 | |||
| cb07256868 | |||
| 5ec13f81a2 | |||
| 394c393998 | |||
| a4ba5831a0 | |||
| ac45214f7d | |||
| 12a1cf4783 | |||
| 7fd21aefd8 | |||
| 3f9063e775 | |||
| 8758cdca10 | |||
| 529b1e7ec7 | |||
| b8bfba8dc6 | |||
| d6ddcdfa6a | |||
| 7380aae601 | |||
| 6a6cd35985 | |||
| 941a493f49 | |||
| 1e275568f1 | |||
| 2a78b4e9a3 | |||
| 8cf8fc27fa | |||
| 68d67f2cbf | |||
| c1754d9e5d | |||
| af9b8c1be3 | |||
| 292fc5c580 | |||
| 11f5e33a90 |
5
.mailmap
5
.mailmap
@ -44,6 +44,7 @@ Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
@ -394,6 +395,8 @@ Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com> <scherer_stefan@icloud.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
||||
@ -402,6 +405,8 @@ Steve Richards <steve.richards@docker.com> stevejr <>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com> <wonderflow@zju.edu.cn>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com> <me@darkowlzz.space>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||
|
||||
9
AUTHORS
9
AUTHORS
@ -58,6 +58,7 @@ Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||
@ -158,6 +159,7 @@ David Cramer <davcrame@cisco.com>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Lechner <david@lechnology.com>
|
||||
David Scott <dave@recoil.org>
|
||||
David Sheets <dsheets@docker.com>
|
||||
David Williamson <david.williamson@docker.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
@ -300,6 +302,7 @@ Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jimmy Leger <jimmy.leger@gmail.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joe Doliner <jdoliner@pachyderm.io>
|
||||
Joe Gordon <joe.gordon0@gmail.com>
|
||||
@ -471,9 +474,11 @@ Mrunal Patel <mrunalp@gmail.com>
|
||||
muicoder <muicoder@gmail.com>
|
||||
Muthukumar R <muthur@gmail.com>
|
||||
Máximo Cuadros <mcuadros@gmail.com>
|
||||
Mårten Cassel <marten.cassel@gmail.com>
|
||||
Nace Oroz <orkica@gmail.com>
|
||||
Nahum Shalman <nshalman@omniti.com>
|
||||
Nalin Dahyabhai <nalin@redhat.com>
|
||||
Nao YONASHIRO <owan.orisano@gmail.com>
|
||||
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
||||
Natalie Parker <nparker@omnifone.com>
|
||||
Nate Brennand <nate.brennand@clever.com>
|
||||
@ -595,7 +600,7 @@ Spencer Brown <spencer@spencerbrown.org>
|
||||
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <scherer_stefan@icloud.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
@ -605,7 +610,9 @@ Steve Richards <steve.richards@docker.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
Sungwon Han <sungwon.han@navercorp.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||
|
||||
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.12.4
|
||||
GOVERSION: 1.12.5
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
|
||||
@ -77,6 +77,8 @@ jobs:
|
||||
echo 'Codecov failed to upload'
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
path: test-results
|
||||
|
||||
validate:
|
||||
working_directory: /work
|
||||
|
||||
@ -101,5 +101,6 @@ func main() {
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: "testing",
|
||||
Experimental: os.Getenv("HELLO_EXPERIMENTAL") != "",
|
||||
})
|
||||
}
|
||||
|
||||
@ -12,9 +12,10 @@ import (
|
||||
)
|
||||
|
||||
type fakeCandidate struct {
|
||||
path string
|
||||
exec bool
|
||||
meta string
|
||||
path string
|
||||
exec bool
|
||||
meta string
|
||||
allowExperimental bool
|
||||
}
|
||||
|
||||
func (c *fakeCandidate) Path() string {
|
||||
@ -35,9 +36,10 @@ func TestValidateCandidate(t *testing.T) {
|
||||
builtinName = NamePrefix + "builtin"
|
||||
builtinAlias = NamePrefix + "alias"
|
||||
|
||||
badPrefixPath = "/usr/local/libexec/cli-plugins/wobble"
|
||||
badNamePath = "/usr/local/libexec/cli-plugins/docker-123456"
|
||||
goodPluginPath = "/usr/local/libexec/cli-plugins/" + goodPluginName
|
||||
badPrefixPath = "/usr/local/libexec/cli-plugins/wobble"
|
||||
badNamePath = "/usr/local/libexec/cli-plugins/docker-123456"
|
||||
goodPluginPath = "/usr/local/libexec/cli-plugins/" + goodPluginName
|
||||
metaExperimental = `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing", "Experimental": true}`
|
||||
)
|
||||
|
||||
fakeroot := &cobra.Command{Use: "docker"}
|
||||
@ -49,40 +51,46 @@ func TestValidateCandidate(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, tc := range []struct {
|
||||
c *fakeCandidate
|
||||
name string
|
||||
c *fakeCandidate
|
||||
|
||||
// Either err or invalid may be non-empty, but not both (both can be empty for a good plugin).
|
||||
err string
|
||||
invalid string
|
||||
}{
|
||||
/* Each failing one of the tests */
|
||||
{c: &fakeCandidate{path: ""}, err: "plugin candidate path cannot be empty"},
|
||||
{c: &fakeCandidate{path: badPrefixPath}, err: fmt.Sprintf("does not have %q prefix", NamePrefix)},
|
||||
{c: &fakeCandidate{path: badNamePath}, invalid: "did not match"},
|
||||
{c: &fakeCandidate{path: builtinName}, invalid: `plugin "builtin" duplicates builtin command`},
|
||||
{c: &fakeCandidate{path: builtinAlias}, invalid: `plugin "alias" duplicates an alias of builtin command "builtin"`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: false}, invalid: fmt.Sprintf("failed to fetch metadata: faked a failure to exec %q", goodPluginPath)},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `xyzzy`}, invalid: "invalid character"},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{}`}, invalid: `plugin SchemaVersion "" is not valid`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "xyzzy"}`}, invalid: `plugin SchemaVersion "xyzzy" is not valid`},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0"}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": ""}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "empty path", c: &fakeCandidate{path: ""}, err: "plugin candidate path cannot be empty"},
|
||||
{name: "bad prefix", c: &fakeCandidate{path: badPrefixPath}, err: fmt.Sprintf("does not have %q prefix", NamePrefix)},
|
||||
{name: "bad path", c: &fakeCandidate{path: badNamePath}, invalid: "did not match"},
|
||||
{name: "builtin command", c: &fakeCandidate{path: builtinName}, invalid: `plugin "builtin" duplicates builtin command`},
|
||||
{name: "builtin alias", c: &fakeCandidate{path: builtinAlias}, invalid: `plugin "alias" duplicates an alias of builtin command "builtin"`},
|
||||
{name: "fetch failure", c: &fakeCandidate{path: goodPluginPath, exec: false}, invalid: fmt.Sprintf("failed to fetch metadata: faked a failure to exec %q", goodPluginPath)},
|
||||
{name: "metadata not json", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `xyzzy`}, invalid: "invalid character"},
|
||||
{name: "empty schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{}`}, invalid: `plugin SchemaVersion "" is not valid`},
|
||||
{name: "invalid schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "xyzzy"}`}, invalid: `plugin SchemaVersion "xyzzy" is not valid`},
|
||||
{name: "no vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0"}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "empty vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": ""}`}, invalid: "plugin metadata does not define a vendor"},
|
||||
{name: "experimental required", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental}, invalid: "requires experimental CLI"},
|
||||
// This one should work
|
||||
{c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`}},
|
||||
{name: "valid", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`}},
|
||||
{name: "valid + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`, allowExperimental: true}},
|
||||
{name: "experimental + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental, allowExperimental: true}},
|
||||
} {
|
||||
p, err := newPlugin(tc.c, fakeroot)
|
||||
if tc.err != "" {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else if tc.invalid != "" {
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{})))
|
||||
assert.ErrorContains(t, p.Err, tc.invalid)
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, NamePrefix+p.Name, goodPluginName)
|
||||
assert.Equal(t, p.SchemaVersion, "0.1.0")
|
||||
assert.Equal(t, p.Vendor, "e2e-testing")
|
||||
}
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p, err := newPlugin(tc.c, fakeroot, tc.c.allowExperimental)
|
||||
if tc.err != "" {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else if tc.invalid != "" {
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{})))
|
||||
assert.ErrorContains(t, p.Err, tc.invalid)
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, NamePrefix+p.Name, goodPluginName)
|
||||
assert.Equal(t, p.SchemaVersion, "0.1.0")
|
||||
assert.Equal(t, p.Vendor, "e2e-testing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -27,10 +28,23 @@ func (e errPluginNotFound) Error() string {
|
||||
return "Error: No such CLI plugin: " + string(e)
|
||||
}
|
||||
|
||||
type errPluginRequireExperimental string
|
||||
|
||||
// Note: errPluginRequireExperimental implements notFound so that the plugin
|
||||
// is skipped when listing the plugins.
|
||||
func (e errPluginRequireExperimental) NotFound() {}
|
||||
|
||||
func (e errPluginRequireExperimental) Error() string {
|
||||
return fmt.Sprintf("plugin candidate %q: requires experimental CLI", string(e))
|
||||
}
|
||||
|
||||
type notFound interface{ NotFound() }
|
||||
|
||||
// IsNotFound is true if the given error is due to a plugin not being found.
|
||||
func IsNotFound(err error) bool {
|
||||
if e, ok := err.(*pluginError); ok {
|
||||
err = e.Cause()
|
||||
}
|
||||
_, ok := err.(notFound)
|
||||
return ok
|
||||
}
|
||||
@ -117,12 +131,14 @@ func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error
|
||||
continue
|
||||
}
|
||||
c := &candidate{paths[0]}
|
||||
p, err := newPlugin(c, rootcmd)
|
||||
p, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.ShadowedPaths = paths[1:]
|
||||
plugins = append(plugins, p)
|
||||
if !IsNotFound(p.Err) {
|
||||
p.ShadowedPaths = paths[1:]
|
||||
plugins = append(plugins, p)
|
||||
}
|
||||
}
|
||||
|
||||
return plugins, nil
|
||||
@ -159,12 +175,19 @@ func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command
|
||||
}
|
||||
|
||||
c := &candidate{path: path}
|
||||
plugin, err := newPlugin(c, rootcmd)
|
||||
plugin, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if plugin.Err != nil {
|
||||
// TODO: why are we not returning plugin.Err?
|
||||
|
||||
err := plugin.Err.(*pluginError).Cause()
|
||||
// if an experimental plugin was invoked directly while experimental mode is off
|
||||
// provide a more useful error message than "not found".
|
||||
if err, ok := err.(errPluginRequireExperimental); ok {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
cmd := exec.Command(plugin.Path, args...)
|
||||
|
||||
@ -22,4 +22,7 @@ type Metadata struct {
|
||||
ShortDescription string `json:",omitempty"`
|
||||
// URL is a pointer to the plugin's homepage.
|
||||
URL string `json:",omitempty"`
|
||||
// Experimental specifies whether the plugin is experimental.
|
||||
// Experimental plugins are not displayed on non-experimental CLIs.
|
||||
Experimental bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -33,7 +33,9 @@ type Plugin struct {
|
||||
// is set, and is always a `pluginError`, but the `Plugin` is still
|
||||
// returned with no error. An error is only returned due to a
|
||||
// non-recoverable error.
|
||||
func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||
//
|
||||
// nolint: gocyclo
|
||||
func newPlugin(c Candidate, rootcmd *cobra.Command, allowExperimental bool) (Plugin, error) {
|
||||
path := c.Path()
|
||||
if path == "" {
|
||||
return Plugin{}, errors.New("plugin candidate path cannot be empty")
|
||||
@ -94,7 +96,10 @@ func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||
p.Err = wrapAsPluginError(err, "invalid metadata")
|
||||
return p, nil
|
||||
}
|
||||
|
||||
if p.Experimental && !allowExperimental {
|
||||
p.Err = &pluginError{errPluginRequireExperimental(p.Name)}
|
||||
return p, nil
|
||||
}
|
||||
if p.Metadata.SchemaVersion != "0.1.0" {
|
||||
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
|
||||
return p, nil
|
||||
|
||||
@ -114,11 +114,14 @@ func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta
|
||||
fullname := manager.NamePrefix + name
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name),
|
||||
Short: fullname + " is a Docker CLI plugin",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: PersistentPreRunE,
|
||||
Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name),
|
||||
Short: fullname + " is a Docker CLI plugin",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// We can't use this as the hook directly since it is initialised later (in runPlugin)
|
||||
return PersistentPreRunE(cmd, args)
|
||||
},
|
||||
TraverseChildren: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
dcontext "github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
kubcontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
@ -210,11 +209,11 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
|
||||
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||
|
||||
baseContextSore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
|
||||
baseContextStore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
|
||||
cli.contextStore = &ContextStoreWithDefault{
|
||||
Store: baseContextSore,
|
||||
Store: baseContextStore,
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts.Common, cli.ConfigFile(), cli.Err())
|
||||
return ResolveDefaultContext(opts.Common, cli.ConfigFile(), cli.contextStoreConfig, cli.Err())
|
||||
},
|
||||
}
|
||||
cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore)
|
||||
@ -259,10 +258,11 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
storeConfig := DefaultContextStoreConfig()
|
||||
store := &ContextStoreWithDefault{
|
||||
Store: store.New(cliconfig.ContextStoreDir(), defaultContextStoreConfig()),
|
||||
Store: store.New(cliconfig.ContextStoreDir(), storeConfig),
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return resolveDefaultContext(opts, configFile, ioutil.Discard)
|
||||
return ResolveDefaultContext(opts, configFile, storeConfig, ioutil.Discard)
|
||||
},
|
||||
}
|
||||
contextName, err := resolveContextName(opts, configFile, store)
|
||||
@ -453,7 +453,7 @@ func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
|
||||
WithContentTrustFromEnv(),
|
||||
WithContainerizedClient(containerizedengine.NewClient),
|
||||
}
|
||||
cli.contextStoreConfig = defaultContextStoreConfig()
|
||||
cli.contextStoreConfig = DefaultContextStoreConfig()
|
||||
ops = append(defaultOps, ops...)
|
||||
if err := cli.Apply(ops...); err != nil {
|
||||
return nil, err
|
||||
@ -526,10 +526,22 @@ func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigF
|
||||
return DefaultContextName, nil
|
||||
}
|
||||
|
||||
func defaultContextStoreConfig() store.Config {
|
||||
var defaultStoreEndpoints = []store.NamedTypeGetter{
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
}
|
||||
|
||||
// RegisterDefaultStoreEndpoints registers a new named endpoint
|
||||
// metadata type with the default context store config, so that
|
||||
// endpoint will be supported by stores using the config returned by
|
||||
// DefaultContextStoreConfig.
|
||||
func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) {
|
||||
defaultStoreEndpoints = append(defaultStoreEndpoints, ep...)
|
||||
}
|
||||
|
||||
// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured.
|
||||
func DefaultContextStoreConfig() store.Config {
|
||||
return store.NewConfig(
|
||||
func() interface{} { return &DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
store.EndpointTypeGetter(kubcontext.KubernetesEndpoint, func() interface{} { return &kubcontext.EndpointMeta{} }),
|
||||
defaultStoreEndpoints...,
|
||||
)
|
||||
}
|
||||
|
||||
@ -7,7 +7,6 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
@ -97,7 +96,7 @@ func WithContainerizedClient(containerizedFn func(string) (clitypes.Containerize
|
||||
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
||||
return func(cli *DockerCli) error {
|
||||
switch endpointName {
|
||||
case docker.DockerEndpoint, kubernetes.KubernetesEndpoint:
|
||||
case docker.DockerEndpoint:
|
||||
return fmt.Errorf("cannot change %q endpoint type", endpointName)
|
||||
}
|
||||
cli.contextStoreConfig.SetEndpoint(endpointName, endpointType)
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
func newImportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "import CONTEXT FILE|-",
|
||||
Short: "Import a context from a tar file",
|
||||
Short: "Import a context from a tar or zip file",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return RunImport(dockerCli, args[0], args[1])
|
||||
@ -28,6 +28,7 @@ func RunImport(dockerCli command.Cli, name string, source string) error {
|
||||
if err := checkContextNameForCreation(dockerCli.ContextStore(), name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
if source == "-" {
|
||||
reader = dockerCli.In()
|
||||
@ -43,6 +44,7 @@ func RunImport(dockerCli command.Cli, name string, source string) error {
|
||||
if err := store.Import(name, dockerCli.ContextStore(), reader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
fmt.Fprintf(dockerCli.Err(), "Successfully imported context %q\n", name)
|
||||
return nil
|
||||
|
||||
@ -3,15 +3,11 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -20,7 +16,7 @@ const (
|
||||
DefaultContextName = "default"
|
||||
)
|
||||
|
||||
// DefaultContext contains the default context data for all enpoints
|
||||
// DefaultContext contains the default context data for all endpoints
|
||||
type DefaultContext struct {
|
||||
Meta store.Metadata
|
||||
TLS store.ContextTLSData
|
||||
@ -35,8 +31,21 @@ type ContextStoreWithDefault struct {
|
||||
Resolver DefaultContextResolver
|
||||
}
|
||||
|
||||
// resolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func resolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, stderr io.Writer) (*DefaultContext, error) {
|
||||
// EndpointDefaultResolver is implemented by any EndpointMeta object
|
||||
// which wants to be able to populate the store with whatever their default is.
|
||||
type EndpointDefaultResolver interface {
|
||||
// ResolveDefault returns values suitable for storing in store.Metadata.Endpoints
|
||||
// and store.ContextTLSData.Endpoints.
|
||||
//
|
||||
// An error is only returned for something fatal, not simply
|
||||
// the lack of a default (e.g. because the config file which
|
||||
// would contain it is missing). If there is no default then
|
||||
// returns nil, nil, nil.
|
||||
ResolveDefault(Orchestrator) (interface{}, *store.EndpointTLSData, error)
|
||||
}
|
||||
|
||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) {
|
||||
stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -62,20 +71,28 @@ func resolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
||||
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
|
||||
// Default context uses env-based kubeconfig for Kubernetes endpoint configuration
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := kubernetes.FromKubeConfig(kubeconfig, "", "")
|
||||
if (stackOrchestrator == OrchestratorKubernetes || stackOrchestrator == OrchestratorAll) && err != nil {
|
||||
return nil, errors.Wrapf(err, "default orchestrator is %s but kubernetes endpoint could not be found", stackOrchestrator)
|
||||
}
|
||||
if err == nil {
|
||||
contextMetadata.Endpoints[kubernetes.KubernetesEndpoint] = kubeEP.EndpointMeta
|
||||
if kubeEP.TLSData != nil {
|
||||
contextTLSData.Endpoints[kubernetes.KubernetesEndpoint] = *kubeEP.TLSData.ToStoreTLSData()
|
||||
if err := storeconfig.ForeachEndpointType(func(n string, get store.TypeGetter) error {
|
||||
if n == docker.DockerEndpoint { // handled above
|
||||
return nil
|
||||
}
|
||||
ep := get()
|
||||
if i, ok := ep.(EndpointDefaultResolver); ok {
|
||||
meta, tls, err := i.ResolveDefault(stackOrchestrator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if meta == nil {
|
||||
return nil
|
||||
}
|
||||
contextMetadata.Endpoints[n] = meta
|
||||
if tls != nil {
|
||||
contextTLSData.Endpoints[n] = *tls
|
||||
}
|
||||
}
|
||||
// Nothing to be done
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
@ -63,22 +62,20 @@ func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
defer env.Patch(t, "DOCKER_HOST", "ssh://someswarmserver")()
|
||||
defer env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")()
|
||||
cli.configFile = &configfile.ConfigFile{
|
||||
StackOrchestrator: "all",
|
||||
StackOrchestrator: "swarm",
|
||||
}
|
||||
ctx, err := resolveDefaultContext(&cliflags.CommonOptions{
|
||||
ctx, err := ResolveDefaultContext(&cliflags.CommonOptions{
|
||||
TLS: true,
|
||||
TLSOptions: &tlsconfig.Options{
|
||||
CAFile: "./testdata/ca.pem",
|
||||
},
|
||||
}, cli.ConfigFile(), cli.Err())
|
||||
}, cli.ConfigFile(), DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, OrchestratorAll, ctx.Meta.Metadata.(DockerContext).StackOrchestrator)
|
||||
assert.Equal(t, OrchestratorSwarm, ctx.Meta.Metadata.(DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "ssh://someswarmserver", ctx.Meta.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host)
|
||||
golden.Assert(t, string(ctx.TLS.Endpoints[docker.DockerEndpoint].Files["ca.pem"]), "ca.pem")
|
||||
assert.DeepEqual(t, "zoinx", ctx.Meta.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta).DefaultNamespace)
|
||||
}
|
||||
|
||||
func TestExportDefaultImport(t *testing.T) {
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
@ -173,7 +174,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
}))
|
||||
}
|
||||
|
||||
s.Allow(authprovider.NewDockerAuthProvider())
|
||||
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
if len(options.secrets) > 0 {
|
||||
sp, err := parseSecretSpecs(options.secrets)
|
||||
if err != nil {
|
||||
@ -215,6 +216,14 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
})
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" {
|
||||
options.progress = v
|
||||
}
|
||||
|
||||
if strings.EqualFold(options.platform, "local") {
|
||||
options.platform = platforms.DefaultString()
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
defer func() { // make sure the Status ends cleanly on build errors
|
||||
s.Close()
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"github.com/docker/cli/cli/version"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"github.com/docker/cli/templates"
|
||||
kubeapi "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -243,7 +244,7 @@ func getKubernetesVersion(dockerCli command.Cli, kubeConfig string) *kubernetesV
|
||||
err error
|
||||
)
|
||||
if dockerCli.CurrentContext() == "" {
|
||||
clientConfig = kubernetes.NewKubernetesConfig(kubeConfig)
|
||||
clientConfig = kubeapi.NewKubernetesConfig(kubeConfig)
|
||||
} else {
|
||||
clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore())
|
||||
}
|
||||
|
||||
@ -1,9 +1,15 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
api "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@ -17,6 +23,8 @@ type EndpointMeta struct {
|
||||
Exec *clientcmdapi.ExecConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
var _ command.EndpointDefaultResolver = &EndpointMeta{}
|
||||
|
||||
// Endpoint is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, with TLS data
|
||||
type Endpoint struct {
|
||||
@ -24,6 +32,12 @@ type Endpoint struct {
|
||||
TLSData *context.TLSData
|
||||
}
|
||||
|
||||
func init() {
|
||||
command.RegisterDefaultStoreEndpoints(
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
}
|
||||
|
||||
// WithTLSData loads TLS materials for the endpoint
|
||||
func (c *EndpointMeta) WithTLSData(s store.Reader, contextName string) (Endpoint, error) {
|
||||
tlsData, err := context.LoadTLSData(s, contextName, KubernetesEndpoint)
|
||||
@ -61,6 +75,32 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
return clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
}
|
||||
|
||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||
// endpoint, which is derived from the env-based kubeconfig.
|
||||
func (c *EndpointMeta) ResolveDefault(stackOrchestrator command.Orchestrator) (interface{}, *store.EndpointTLSData, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
if stackOrchestrator == command.OrchestratorKubernetes || stackOrchestrator == command.OrchestratorAll {
|
||||
return nil, nil, errors.Wrapf(err, "default orchestrator is %s but unable to resolve kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
|
||||
// We deliberately quash the error here, returning nil
|
||||
// for the first argument is sufficient to indicate we weren't able to
|
||||
// provide a default
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var tls *store.EndpointTLSData
|
||||
if kubeEP.TLSData != nil {
|
||||
tls = kubeEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
return kubeEP.EndpointMeta, tls, nil
|
||||
}
|
||||
|
||||
// EndpointFromContext extracts kubernetes endpoint info from current context
|
||||
func EndpointFromContext(metadata store.Metadata) *EndpointMeta {
|
||||
ep, ok := metadata.Endpoints[KubernetesEndpoint]
|
||||
@ -91,5 +131,5 @@ func ConfigFromContext(name string, s store.Reader) (clientcmd.ClientConfig, err
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return kubernetes.NewKubernetesConfig(""), nil
|
||||
return api.NewKubernetesConfig(""), nil
|
||||
}
|
||||
|
||||
25
cli/context/kubernetes/load_test.go
Normal file
25
cli/context/kubernetes/load_test.go
Normal file
@ -0,0 +1,25 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/env"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
defer env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")()
|
||||
configFile := &configfile.ConfigFile{
|
||||
StackOrchestrator: "all",
|
||||
}
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, configFile, command.DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, command.OrchestratorAll, ctx.Meta.Metadata.(command.DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
||||
29
cli/context/store/io_utils.go
Normal file
29
cli/context/store/io_utils.go
Normal file
@ -0,0 +1,29 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// LimitedReader is a fork of io.LimitedReader to override Read.
|
||||
type LimitedReader struct {
|
||||
R io.Reader
|
||||
N int64 // max bytes remaining
|
||||
}
|
||||
|
||||
// Read is a fork of io.LimitedReader.Read that returns an error when limit exceeded.
|
||||
func (l *LimitedReader) Read(p []byte) (n int, err error) {
|
||||
if l.N < 0 {
|
||||
return 0, errors.New("read exceeds the defined limit")
|
||||
}
|
||||
if l.N == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// have to cap N + 1 otherwise we won't hit limit err
|
||||
if int64(len(p)) > l.N+1 {
|
||||
p = p[0 : l.N+1]
|
||||
}
|
||||
n, err = l.R.Read(p)
|
||||
l.N -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
24
cli/context/store/io_utils_test.go
Normal file
24
cli/context/store/io_utils_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestLimitReaderReadAll(t *testing.T) {
|
||||
r := strings.NewReader("Reader")
|
||||
|
||||
_, err := ioutil.ReadAll(r)
|
||||
assert.NilError(t, err)
|
||||
|
||||
r = strings.NewReader("Test")
|
||||
_, err = ioutil.ReadAll(&LimitedReader{R: r, N: 4})
|
||||
assert.NilError(t, err)
|
||||
|
||||
r = strings.NewReader("Test")
|
||||
_, err = ioutil.ReadAll(&LimitedReader{R: r, N: 2})
|
||||
assert.Error(t, err, "read exceeds the defined limit")
|
||||
}
|
||||
@ -2,12 +2,16 @@ package store
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
_ "crypto/sha256" // ensure ids can be computed
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -259,12 +263,44 @@ func Export(name string, s Reader) io.ReadCloser {
|
||||
return reader
|
||||
}
|
||||
|
||||
const (
|
||||
maxAllowedFileSizeToImport int64 = 10 << 20
|
||||
zipType string = "application/zip"
|
||||
)
|
||||
|
||||
func getImportContentType(r *bufio.Reader) (string, error) {
|
||||
head, err := r.Peek(512)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return http.DetectContentType(head), nil
|
||||
}
|
||||
|
||||
// Import imports an exported context into a store
|
||||
func Import(name string, s Writer, reader io.Reader) error {
|
||||
tr := tar.NewReader(reader)
|
||||
// Buffered reader will not advance the buffer, needed to determine content type
|
||||
r := bufio.NewReader(reader)
|
||||
|
||||
importContentType, err := getImportContentType(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch importContentType {
|
||||
case zipType:
|
||||
return importZip(name, s, r)
|
||||
default:
|
||||
// Assume it's a TAR (TAR does not have a "magic number")
|
||||
return importTar(name, s, r)
|
||||
}
|
||||
}
|
||||
|
||||
func importTar(name string, s Writer, reader io.Reader) error {
|
||||
tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
tlsData := ContextTLSData{
|
||||
Endpoints: map[string]EndpointTLSData{},
|
||||
}
|
||||
var importedMetaFile bool
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@ -282,37 +318,119 @@ func Import(name string, s Writer, reader io.Reader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var meta Metadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
meta, err := parseMetadata(data, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.Name = name
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
importedMetaFile = true
|
||||
} else if strings.HasPrefix(hdr.Name, "tls/") {
|
||||
relative := strings.TrimPrefix(hdr.Name, "tls/")
|
||||
parts := strings.SplitN(relative, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return errors.New("archive format is invalid")
|
||||
}
|
||||
endpointName := parts[0]
|
||||
fileName := parts[1]
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := tlsData.Endpoints[endpointName]; !ok {
|
||||
tlsData.Endpoints[endpointName] = EndpointTLSData{
|
||||
Files: map[string][]byte{},
|
||||
}
|
||||
if err := importEndpointTLS(&tlsData, hdr.Name, data); err != nil {
|
||||
return err
|
||||
}
|
||||
tlsData.Endpoints[endpointName].Files[fileName] = data
|
||||
}
|
||||
}
|
||||
if !importedMetaFile {
|
||||
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
|
||||
}
|
||||
return s.ResetTLSMaterial(name, &tlsData)
|
||||
}
|
||||
|
||||
func importZip(name string, s Writer, reader io.Reader) error {
|
||||
body, err := ioutil.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zr, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsData := ContextTLSData{
|
||||
Endpoints: map[string]EndpointTLSData{},
|
||||
}
|
||||
|
||||
var importedMetaFile bool
|
||||
for _, zf := range zr.File {
|
||||
fi := zf.FileInfo()
|
||||
if fi.IsDir() {
|
||||
// skip this entry, only taking files into account
|
||||
continue
|
||||
}
|
||||
if zf.Name == metaFile {
|
||||
f, err := zf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := parseMetadata(data, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
importedMetaFile = true
|
||||
} else if strings.HasPrefix(zf.Name, "tls/") {
|
||||
f, err := zf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := ioutil.ReadAll(f)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = importEndpointTLS(&tlsData, zf.Name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !importedMetaFile {
|
||||
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
|
||||
}
|
||||
return s.ResetTLSMaterial(name, &tlsData)
|
||||
}
|
||||
|
||||
func parseMetadata(data []byte, name string) (Metadata, error) {
|
||||
var meta Metadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return meta, err
|
||||
}
|
||||
meta.Name = name
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func importEndpointTLS(tlsData *ContextTLSData, path string, data []byte) error {
|
||||
parts := strings.SplitN(strings.TrimPrefix(path, "tls/"), "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// TLS endpoints require archived file directory with 2 layers
|
||||
// i.e. tls/{endpointName}/{fileName}
|
||||
return errors.New("archive format is invalid")
|
||||
}
|
||||
|
||||
epName := parts[0]
|
||||
fileName := parts[1]
|
||||
if _, ok := tlsData.Endpoints[epName]; !ok {
|
||||
tlsData.Endpoints[epName] = EndpointTLSData{
|
||||
Files: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
tlsData.Endpoints[epName].Files[fileName] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
type setContextName interface {
|
||||
setContext(name string)
|
||||
}
|
||||
|
||||
@ -1,9 +1,16 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
@ -125,3 +132,127 @@ func TestErrHasCorrectContext(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "no-exists")
|
||||
assert.Check(t, IsErrContextDoesNotExist(err))
|
||||
}
|
||||
|
||||
func TestDetectImportContentType(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
r := bufio.NewReader(buf)
|
||||
ct, err := getImportContentType(r)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, zipType != ct)
|
||||
}
|
||||
|
||||
func TestImportTarInvalid(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
tf := path.Join(testDir, "test.context")
|
||||
|
||||
f, err := os.Create(tf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
tw := tar.NewWriter(f)
|
||||
hdr := &tar.Header{
|
||||
Name: "dummy-file",
|
||||
Mode: 0600,
|
||||
Size: int64(len("hello world")),
|
||||
}
|
||||
err = tw.WriteHeader(hdr)
|
||||
assert.NilError(t, err)
|
||||
_, err = tw.Write([]byte("hello world"))
|
||||
assert.NilError(t, err)
|
||||
err = tw.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(tf)
|
||||
assert.NilError(t, err)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("tarInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
}
|
||||
|
||||
func TestImportZip(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
zf := path.Join(testDir, "test.zip")
|
||||
|
||||
f, err := os.Create(zf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
meta, err := json.Marshal(Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
"ep1": endpoint{Foo: "bar"},
|
||||
},
|
||||
Metadata: context{Bar: "baz"},
|
||||
Name: "source",
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"meta.json", string(meta)},
|
||||
{path.Join("tls", "docker", "ca.pem"), string([]byte("ca.pem"))},
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
f, err := w.Create(file.Name)
|
||||
assert.NilError(t, err)
|
||||
_, err = f.Write([]byte(file.Body))
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
err = w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(zf)
|
||||
assert.NilError(t, err)
|
||||
ct, err := getImportContentType(bufio.NewReader(source))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, zipType, ct)
|
||||
|
||||
source, _ = os.Open(zf)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("zipTest", s, r)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestImportZipInvalid(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
zf := path.Join(testDir, "test.zip")
|
||||
|
||||
f, err := os.Create(zf)
|
||||
defer f.Close()
|
||||
assert.NilError(t, err)
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
df, err := w.Create("dummy-file")
|
||||
assert.NilError(t, err)
|
||||
_, err = df.Write([]byte("hello world"))
|
||||
assert.NilError(t, err)
|
||||
err = w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
source, err := os.Open(zf)
|
||||
assert.NilError(t, err)
|
||||
defer source.Close()
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("zipInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
}
|
||||
|
||||
@ -30,6 +30,16 @@ func (c Config) SetEndpoint(name string, getter TypeGetter) {
|
||||
c.endpointTypes[name] = getter
|
||||
}
|
||||
|
||||
// ForeachEndpointType calls cb on every endpoint type registered with the Config
|
||||
func (c Config) ForeachEndpointType(cb func(string, TypeGetter) error) error {
|
||||
for n, ep := range c.endpointTypes {
|
||||
if err := cb(n, ep); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfig creates a config object
|
||||
func NewConfig(contextType TypeGetter, endpoints ...NamedTypeGetter) Config {
|
||||
res := Config{
|
||||
|
||||
@ -69,16 +69,22 @@ func newDockerCommand(dockerCli *command.DockerCli) *cli.TopLevelCommand {
|
||||
return cli.NewTopLevelCommand(cmd, dockerCli, opts, flags)
|
||||
}
|
||||
|
||||
func setFlagErrorFunc(dockerCli *command.DockerCli, cmd *cobra.Command) {
|
||||
func setFlagErrorFunc(dockerCli command.Cli, cmd *cobra.Command) {
|
||||
// When invoking `docker stack --nonsense`, we need to make sure FlagErrorFunc return appropriate
|
||||
// output if the feature is not supported.
|
||||
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
|
||||
// is called.
|
||||
flagErrorFunc := cmd.FlagErrorFunc()
|
||||
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
|
||||
if err := pluginmanager.AddPluginCommandStubs(dockerCli, cmd.Root()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := isSupported(cmd, dockerCli); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hideUnsupportedFeatures(cmd, dockerCli); err != nil {
|
||||
return err
|
||||
}
|
||||
return flagErrorFunc(cmd, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -2112,8 +2112,8 @@ _docker_container_run_and_create() {
|
||||
return
|
||||
;;
|
||||
--security-opt)
|
||||
COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") )
|
||||
if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then
|
||||
COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp= systempaths=unconfined" -- "$cur") )
|
||||
if [[ ${COMPREPLY[*]} = *= ]] ; then
|
||||
__docker_nospace
|
||||
fi
|
||||
return
|
||||
@ -2342,11 +2342,15 @@ _docker_context_create() {
|
||||
--description|--docker|--kubernetes)
|
||||
return
|
||||
;;
|
||||
--from)
|
||||
__docker_complete_contexts
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--default-stack-orchestrator --description --docker --help --kubernetes" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--default-stack-orchestrator --description --docker --from --help --kubernetes" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@ -2617,36 +2621,15 @@ _docker_daemon() {
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo "$cur" | tr '[:upper:]' '[:lower:]')" ) )
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs overlay2 vfs zfs" -- "$(echo "$cur" | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
--storage-opt)
|
||||
local btrfs_options="btrfs.min_space"
|
||||
local devicemapper_options="
|
||||
dm.basesize
|
||||
dm.blkdiscard
|
||||
dm.blocksize
|
||||
dm.directlvm_device
|
||||
dm.fs
|
||||
dm.libdm_log_level
|
||||
dm.loopdatasize
|
||||
dm.loopmetadatasize
|
||||
dm.min_free_space
|
||||
dm.mkfsarg
|
||||
dm.mountopt
|
||||
dm.override_udev_sync_check
|
||||
dm.thinpooldev
|
||||
dm.thinp_autoextend_percent
|
||||
dm.thinp_autoextend_threshold
|
||||
dm.thinp_metapercent
|
||||
dm.thinp_percent
|
||||
dm.use_deferred_deletion
|
||||
dm.use_deferred_removal
|
||||
"
|
||||
local overlay2_options="overlay2.size"
|
||||
local zfs_options="zfs.fsname"
|
||||
|
||||
local all_options="$btrfs_options $devicemapper_options $overlay2_options $zfs_options"
|
||||
local all_options="$btrfs_options $overlay2_options $zfs_options"
|
||||
|
||||
case $(__docker_value_of_option '--storage-driver|-s') in
|
||||
'')
|
||||
@ -2655,9 +2638,6 @@ _docker_daemon() {
|
||||
btrfs)
|
||||
COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
devicemapper)
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
overlay2)
|
||||
COMPREPLY=( $( compgen -W "$overlay2_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
@ -5178,12 +5158,16 @@ _docker_system_events() {
|
||||
__docker_complete_networks --cur "${cur##*=}"
|
||||
return
|
||||
;;
|
||||
node)
|
||||
__docker_complete_nodes --cur "${cur##*=}"
|
||||
return
|
||||
;;
|
||||
scope)
|
||||
COMPREPLY=( $( compgen -W "local swarm" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
type)
|
||||
COMPREPLY=( $( compgen -W "config container daemon image network plugin secret service volume" -- "${cur##*=}" ) )
|
||||
COMPREPLY=( $( compgen -W "config container daemon image network node plugin secret service volume" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
volume)
|
||||
@ -5194,7 +5178,7 @@ _docker_system_events() {
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network scope type volume" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network node scope type volume" -- "$cur" ) )
|
||||
__docker_nospace
|
||||
return
|
||||
;;
|
||||
|
||||
@ -21,7 +21,7 @@ ifeq ($(DOCKER_CLI_GO_BUILD_CACHE),y)
|
||||
DOCKER_CLI_MOUNTS += -v "$(CACHE_VOLUME_NAME):/root/.cache/go-build"
|
||||
endif
|
||||
VERSION = $(shell cat VERSION)
|
||||
ENVVARS = -e VERSION=$(VERSION) -e GITCOMMIT -e PLATFORM -e TESTFLAGS -e TESTDIRS
|
||||
ENVVARS = -e VERSION=$(VERSION) -e GITCOMMIT -e PLATFORM -e TESTFLAGS -e TESTDIRS -e GOOS -e GOARCH -e GOARM
|
||||
|
||||
# build docker image (dockerfiles/Dockerfile.build)
|
||||
.PHONY: build_docker_image
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
FROM golang:1.12.5-alpine
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM dockercore/golang-cross:1.12.4
|
||||
FROM dockercore/golang-cross:1.12.5
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
COPY . .
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
FROM golang:1.12.5-alpine
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates curl
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.12.4
|
||||
ARG GO_VERSION=1.12.5
|
||||
|
||||
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.12.4-alpine
|
||||
FROM golang:1.12.5-alpine
|
||||
|
||||
RUN apk add -U git
|
||||
|
||||
|
||||
@ -812,7 +812,7 @@ Defaults to 20G.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt size=40G
|
||||
```
|
||||
|
||||
@ -827,7 +827,7 @@ deployments).
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.globalmode=false
|
||||
```
|
||||
|
||||
@ -838,7 +838,7 @@ used for booting a utility VM. Defaults to `%ProgramFiles%\Linux Containers`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.kirdpath=c:\path\to\files
|
||||
```
|
||||
|
||||
@ -849,7 +849,7 @@ Defaults to `bootx64.efi`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.kernel=kernel.efi
|
||||
```
|
||||
|
||||
@ -860,7 +860,7 @@ Defaults to `initrd.img`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.initrd=myinitrd.img
|
||||
```
|
||||
|
||||
@ -872,7 +872,7 @@ are kernel specific.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt "lcow.bootparameters='option=value'"
|
||||
```
|
||||
|
||||
@ -883,7 +883,7 @@ and initrd booting. Defaults to `uvm.vhdx` under `lcow.kirdpath`.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.vhdx=custom.vhdx
|
||||
```
|
||||
|
||||
@ -894,7 +894,7 @@ to 300.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.timeout=240
|
||||
```
|
||||
|
||||
@ -905,7 +905,7 @@ containers. Defaults to 20. Cannot be less than 20.
|
||||
|
||||
###### Example
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
C:\> dockerd --storage-opt lcow.sandboxsize=40
|
||||
```
|
||||
|
||||
|
||||
@ -753,7 +753,7 @@ operating system older than Windows 10 1809 with `--isolation process` will fail
|
||||
On Windows server, assuming the default configuration, these commands are equivalent
|
||||
and result in `process` isolation:
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run -d microsoft/nanoserver powershell echo process
|
||||
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process
|
||||
PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process
|
||||
@ -763,7 +763,7 @@ If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`,
|
||||
are running against a Windows client-based daemon, these commands are equivalent and
|
||||
result in `hyperv` isolation:
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv
|
||||
PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv
|
||||
PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv
|
||||
|
||||
@ -1433,7 +1433,7 @@ today=Wednesday
|
||||
HOME=/root
|
||||
```
|
||||
|
||||
```PowerShell
|
||||
```powershell
|
||||
PS C:\> docker run --rm -e "foo=bar" microsoft/nanoserver cmd /s /c set
|
||||
ALLUSERSPROFILE=C:\ProgramData
|
||||
APPDATA=C:\Users\ContainerAdministrator\AppData\Roaming
|
||||
|
||||
@ -74,18 +74,36 @@ func TestGlobalHelp(t *testing.T) {
|
||||
assert.Assert(t, is.Equal(badmetacount, 1))
|
||||
|
||||
// Running with `--help` should produce the same.
|
||||
res2 := icmd.RunCmd(run("--help"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
t.Run("help_flag", func(t *testing.T) {
|
||||
res2 := icmd.RunCmd(run("--help"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), res.Stdout()))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), ""))
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), res.Stdout()))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), ""))
|
||||
|
||||
// Running just `docker` (without `help` nor `--help`) should produce the same thing, except on Stderr.
|
||||
res2 = icmd.RunCmd(run())
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
t.Run("bare", func(t *testing.T) {
|
||||
res2 := icmd.RunCmd(run())
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 0,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), res.Stdout()))
|
||||
})
|
||||
|
||||
t.Run("badopt", func(t *testing.T) {
|
||||
// Running `docker --badopt` should also produce the
|
||||
// same thing, give or take the leading error message
|
||||
// and a trailing carriage return (due to main() using
|
||||
// Println in the error case).
|
||||
res2 := icmd.RunCmd(run("--badopt"))
|
||||
res2.Assert(t, icmd.Expected{
|
||||
ExitCode: 125,
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
exp := "unknown flag: --badopt\nSee 'docker --help'.\n" + res.Stdout() + "\n"
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), exp))
|
||||
})
|
||||
assert.Assert(t, is.Equal(res2.Stdout(), ""))
|
||||
assert.Assert(t, is.Equal(res2.Stderr(), res.Stdout()))
|
||||
}
|
||||
|
||||
36
e2e/cli-plugins/plugins/nopersistentprerun/main.go
Normal file
36
e2e/cli-plugins/plugins/nopersistentprerun/main.go
Normal file
@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Run(func(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "nopersistentprerun",
|
||||
Short: "Testing without PersistentPreRun hooks",
|
||||
//PersistentPreRunE: Not specified, we need to test that it works in the absence of an explicit call
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cli := dockerCli.Client()
|
||||
ping, err := cli.Ping(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(ping.APIVersion)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
},
|
||||
manager.Metadata{
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: "testing",
|
||||
})
|
||||
}
|
||||
@ -213,15 +213,24 @@ func TestGoodSubcommandHelp(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestCliInitialized tests the code paths which ensure that the Cli
|
||||
// object is initialized even if the plugin uses PersistentRunE
|
||||
// object is initialized whether the plugin uses PersistentRunE or not
|
||||
func TestCliInitialized(t *testing.T) {
|
||||
run, _, cleanup := prepare(t)
|
||||
defer cleanup()
|
||||
|
||||
res := icmd.RunCmd(run("helloworld", "--pre-run", "apiversion"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, res.Stdout() != "")
|
||||
assert.Assert(t, is.Equal(res.Stderr(), "Plugin PersistentPreRunE called"))
|
||||
var apiversion string
|
||||
t.Run("withhook", func(t *testing.T) {
|
||||
res := icmd.RunCmd(run("helloworld", "--pre-run", "apiversion"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, res.Stdout() != "")
|
||||
apiversion = res.Stdout()
|
||||
assert.Assert(t, is.Equal(res.Stderr(), "Plugin PersistentPreRunE called"))
|
||||
})
|
||||
t.Run("withouthook", func(t *testing.T) {
|
||||
res := icmd.RunCmd(run("nopersistentprerun"))
|
||||
res.Assert(t, icmd.Success)
|
||||
assert.Assert(t, is.Equal(res.Stdout(), apiversion))
|
||||
})
|
||||
}
|
||||
|
||||
// TestPluginErrorCode tests when the plugin return with a given exit status.
|
||||
|
||||
21
e2e/context/context_test.go
Normal file
21
e2e/context/context_test.go
Normal file
@ -0,0 +1,21 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/golden"
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestContextList(t *testing.T) {
|
||||
cmd := icmd.Command("docker", "context", "ls")
|
||||
cmd.Env = append(cmd.Env,
|
||||
"DOCKER_CONFIG=./testdata/test-dockerconfig",
|
||||
"KUBECONFIG=./testdata/test-kubeconfig",
|
||||
)
|
||||
result := icmd.RunCmd(cmd).Assert(t, icmd.Expected{
|
||||
Err: icmd.None,
|
||||
ExitCode: 0,
|
||||
})
|
||||
golden.Assert(t, result.Stdout(), "context-ls.golden")
|
||||
}
|
||||
17
e2e/context/main_test.go
Normal file
17
e2e/context/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := environment.Setup(); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
3
e2e/context/testdata/context-ls.golden
vendored
Normal file
3
e2e/context/testdata/context-ls.golden
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock https://someserver (zoinx) swarm
|
||||
remote my remote cluster ssh://someserver https://someserver (default) kubernetes
|
||||
7
e2e/context/testdata/test-dockerconfig/config.json
vendored
Normal file
7
e2e/context/testdata/test-dockerconfig/config.json
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"auths": {},
|
||||
"HttpHeaders": {
|
||||
"User-Agent": "Docker-Client/19.09.0-dev (linux)"
|
||||
},
|
||||
"credsStore": "secretservice"
|
||||
}
|
||||
@ -0,0 +1 @@
|
||||
{"Name":"remote","Metadata":{"Description":"my remote cluster","StackOrchestrator":"kubernetes"},"Endpoints":{"docker":{"Host":"ssh://someserver","SkipTLSVerify":false},"kubernetes":{"Host":"https://someserver","SkipTLSVerify":false,"DefaultNamespace":"default","Exec":{"command":"heptio-authenticator-aws","args":["token","-i","eks-cf"],"env":null,"apiVersion":"client.authentication.k8s.io/v1alpha1"}}}}
|
||||
20
e2e/context/testdata/test-kubeconfig
vendored
Normal file
20
e2e/context/testdata/test-kubeconfig
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
||||
@ -1,8 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import api "github.com/docker/compose-on-kubernetes/api"
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file based on
|
||||
// the KUBECONFIG environment variable and command line flags.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api.NewKubernetesConfig instead
|
||||
var NewKubernetesConfig = api.NewKubernetesConfig
|
||||
@ -22,5 +22,18 @@ export LDFLAGS="\
|
||||
|
||||
GOOS="${GOOS:-$(go env GOHOSTOS)}"
|
||||
GOARCH="${GOARCH:-$(go env GOHOSTARCH)}"
|
||||
export TARGET="build/docker-$GOOS-$GOARCH"
|
||||
if [ "${GOARCH}" = "arm" ]; then
|
||||
GOARM="${GOARM:-$(go env GOHOSTARM)}"
|
||||
fi
|
||||
|
||||
TARGET="build/docker-$GOOS-$GOARCH"
|
||||
if [ "${GOARCH}" = "arm" ] && [ -n "${GOARM}" ]; then
|
||||
TARGET="${TARGET}-v${GOARM}"
|
||||
fi
|
||||
|
||||
if [ "${GOOS}" = "windows" ]; then
|
||||
TARGET="${TARGET}.exe"
|
||||
fi
|
||||
export TARGET
|
||||
|
||||
export SOURCE="github.com/docker/cli/cmd/docker"
|
||||
|
||||
23
vendor.conf
23
vendor.conf
@ -3,10 +3,11 @@ github.com/agl/ed25519 5312a61534124124185d41f09206
|
||||
github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/containerd ceba56893a76f22cf0126c46d835c80fb3833408
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||
github.com/containerd/containerd 3a3f0aac8819165839a41fee77a4f4ac8b103097
|
||||
github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7
|
||||
github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
|
||||
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||
github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
|
||||
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
||||
github.com/cpuguy83/go-md2man 20f5889cbdc3c73dbd2862796665e7c465ade7d1 # v1.0.8
|
||||
@ -14,7 +15,7 @@ github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff7826
|
||||
github.com/dgrijalva/jwt-go a2c85815a77d0f951e33ba4db5ae93629a1530af
|
||||
github.com/docker/compose-on-kubernetes 7a68f5c914c7e06d7a08dc71608f41811c91f0bc # v0.4.21
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
github.com/docker/docker ac48309ac4024b5bfe21a126b1a23a3e93521d75
|
||||
github.com/docker/docker a004854097417a591c3f6a3aeaab75efae3c5814 https://github.com/docker/engine.git # 19.03 branch
|
||||
github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962
|
||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||
@ -23,11 +24,11 @@ github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/docker/licensing 9781369abdb5281cdc07a2a446c6df01347ec793
|
||||
github.com/docker/swarmkit 59163bf75df38489d4a10392265d27156dc473c5
|
||||
github.com/docker/swarmkit 48eb1828ce81be20b25d647f6ca8f33d599f705c
|
||||
github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0
|
||||
github.com/gofrs/flock 7f43ea2e6a643ad441fc12d0ecc0d3388b300c53 # v0.7.0
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/gogo/protobuf 4cbf7e384e768b4e01799441fdf2a706a5635ae7 # v1.2.0
|
||||
github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
|
||||
github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf aa810b61a9c79d51363740d207bb46cf8e620ed5 # v1.2.0
|
||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||
@ -51,13 +52,13 @@ github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5
|
||||
github.com/Microsoft/hcsshim 672e52e9209d1e53718c1b6a7d68cc9272654ab5
|
||||
github.com/miekg/pkcs11 6120d95c0e9576ccf4a78ba40855809dca31a9ed
|
||||
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
|
||||
github.com/moby/buildkit 8818c67cff663befa7b70f21454e340f71616581
|
||||
github.com/moby/buildkit f238f1efb04f00bf0cc147141fda9ddb55c8bc49
|
||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
|
||||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
|
||||
github.com/opencontainers/runc 029124da7af7360afa781a0234d1b083550f797c # v1.0.0-rc7-6-g029124da
|
||||
github.com/opencontainers/runc 425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
|
||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
|
||||
@ -77,7 +78,7 @@ github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
golang.org/x/crypto 38d8ce5564a5b71b2e3a00553993f1b9a7ae852f
|
||||
golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3
|
||||
golang.org/x/net eb5bcb51f2a31c7d5141d810b70815c05d9c9146
|
||||
golang.org/x/oauth2 ef147856a6ddbb60760db74283d2424e98c87bff
|
||||
golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c
|
||||
@ -85,7 +86,7 @@ golang.org/x/sys 4b34438f7a67ee5f45cc6132e2ba
|
||||
golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
|
||||
golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
|
||||
google.golang.org/genproto 02b4e95473316948020af0b7a4f0f22c73929b0e
|
||||
google.golang.org/grpc 7a6a684ca69eb4cae85ad0a484f2e531598c047b # v1.12.2
|
||||
google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
|
||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
|
||||
gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0
|
||||
|
||||
18
vendor/github.com/containerd/console/LICENSE
generated
vendored
18
vendor/github.com/containerd/console/LICENSE
generated
vendored
@ -1,6 +1,7 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@ -175,24 +176,13 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
10
vendor/github.com/containerd/console/README.md
generated
vendored
10
vendor/github.com/containerd/console/README.md
generated
vendored
@ -15,3 +15,13 @@ if err := current.SetRaw(); err != nil {
|
||||
ws, err := current.Size()
|
||||
current.Resize(ws)
|
||||
```
|
||||
|
||||
## Project details
|
||||
|
||||
console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
1055
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
1055
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1553
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
1553
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
444
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
444
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
@ -1,36 +1,20 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||
|
||||
/*
|
||||
Package diff is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ApplyRequest
|
||||
ApplyResponse
|
||||
DiffRequest
|
||||
DiffResponse
|
||||
*/
|
||||
package diff
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import containerd_types "github.com/containerd/containerd/api/types"
|
||||
import containerd_types1 "github.com/containerd/containerd/api/types"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
types "github.com/containerd/containerd/api/types"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -45,32 +29,94 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ApplyRequest struct {
|
||||
// Diff is the descriptor of the diff to be extracted
|
||||
Diff *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
||||
Mounts []*containerd_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
||||
Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
|
||||
Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||
func (*ApplyRequest) ProtoMessage() {}
|
||||
func (*ApplyRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{0} }
|
||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||
func (*ApplyRequest) ProtoMessage() {}
|
||||
func (*ApplyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{0}
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplyRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ApplyRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ApplyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplyRequest proto.InternalMessageInfo
|
||||
|
||||
type ApplyResponse struct {
|
||||
// Applied is the descriptor for the object which was applied.
|
||||
// If the input was a compressed blob then the result will be
|
||||
// the descriptor for the uncompressed blob.
|
||||
Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
||||
Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||
func (*ApplyResponse) ProtoMessage() {}
|
||||
func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{1} }
|
||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||
func (*ApplyResponse) ProtoMessage() {}
|
||||
func (*ApplyResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{1}
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplyResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ApplyResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ApplyResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplyResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplyResponse proto.InternalMessageInfo
|
||||
|
||||
type DiffRequest struct {
|
||||
// Left are the mounts which represent the older copy
|
||||
// in which is the base of the computed changes.
|
||||
Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
||||
Left []*types.Mount `protobuf:"bytes,1,rep,name=left,proto3" json:"left,omitempty"`
|
||||
// Right are the mounts which represents the newer copy
|
||||
// in which changes from the left were made into.
|
||||
Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
||||
Right []*types.Mount `protobuf:"bytes,2,rep,name=right,proto3" json:"right,omitempty"`
|
||||
// MediaType is the media type descriptor for the created diff
|
||||
// object
|
||||
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
@ -79,29 +125,129 @@ type DiffRequest struct {
|
||||
Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||
// Labels are the labels to apply to the generated content
|
||||
// on content store commit.
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||
func (*DiffRequest) ProtoMessage() {}
|
||||
func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{2} }
|
||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||
func (*DiffRequest) ProtoMessage() {}
|
||||
func (*DiffRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{2}
|
||||
}
|
||||
func (m *DiffRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DiffRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DiffRequest.Merge(m, src)
|
||||
}
|
||||
func (m *DiffRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DiffRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DiffRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DiffRequest proto.InternalMessageInfo
|
||||
|
||||
type DiffResponse struct {
|
||||
// Diff is the descriptor of the diff which can be applied
|
||||
Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
||||
Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||
func (*DiffResponse) ProtoMessage() {}
|
||||
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||
func (*DiffResponse) ProtoMessage() {}
|
||||
func (*DiffResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_3b36a99e6faaa935, []int{3}
|
||||
}
|
||||
func (m *DiffResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DiffResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DiffResponse.Merge(m, src)
|
||||
}
|
||||
func (m *DiffResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DiffResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DiffResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
||||
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
||||
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
|
||||
proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptor_3b36a99e6faaa935)
|
||||
}
|
||||
|
||||
var fileDescriptor_3b36a99e6faaa935 = []byte{
|
||||
// 457 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
|
||||
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
|
||||
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
|
||||
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
|
||||
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
|
||||
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
|
||||
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
|
||||
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
|
||||
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
|
||||
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
|
||||
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
|
||||
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
|
||||
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
|
||||
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
|
||||
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
|
||||
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
|
||||
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
|
||||
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
|
||||
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
|
||||
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
|
||||
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
|
||||
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
|
||||
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
|
||||
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
|
||||
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
|
||||
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
|
||||
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -110,8 +256,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Diff service
|
||||
|
||||
// DiffClient is the client API for Diff service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type DiffClient interface {
|
||||
// Apply applies the content associated with the provided digests onto
|
||||
// the provided mounts. Archive content will be extracted and
|
||||
@ -132,7 +279,7 @@ func NewDiffClient(cc *grpc.ClientConn) DiffClient {
|
||||
|
||||
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
||||
out := new(ApplyResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -141,15 +288,14 @@ func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.C
|
||||
|
||||
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
||||
out := new(DiffResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Diff service
|
||||
|
||||
// DiffServer is the server API for Diff service.
|
||||
type DiffServer interface {
|
||||
// Apply applies the content associated with the provided digests onto
|
||||
// the provided mounts. Archive content will be extracted and
|
||||
@ -254,6 +400,9 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -282,6 +431,9 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -353,6 +505,9 @@ func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -381,6 +536,9 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n3
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -394,6 +552,9 @@ func encodeVarintDiff(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *ApplyRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Diff != nil {
|
||||
@ -406,20 +567,32 @@ func (m *ApplyRequest) Size() (n int) {
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ApplyResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Applied != nil {
|
||||
l = m.Applied.Size()
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DiffRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Left) > 0 {
|
||||
@ -450,16 +623,25 @@ func (m *DiffRequest) Size() (n int) {
|
||||
n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DiffResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Diff != nil {
|
||||
l = m.Diff.Size()
|
||||
n += 1 + l + sovDiff(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -481,8 +663,9 @@ func (this *ApplyRequest) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ApplyRequest{`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -492,7 +675,8 @@ func (this *ApplyResponse) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ApplyResponse{`,
|
||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -505,18 +689,19 @@ func (this *DiffRequest) String() string {
|
||||
for k, _ := range this.Labels {
|
||||
keysForLabels = append(keysForLabels, k)
|
||||
}
|
||||
sortkeys.Strings(keysForLabels)
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||
mapStringForLabels := "map[string]string{"
|
||||
for _, k := range keysForLabels {
|
||||
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||
}
|
||||
mapStringForLabels += "}"
|
||||
s := strings.Join([]string{`&DiffRequest{`,
|
||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "types.Mount", 1) + `,`,
|
||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "types.Mount", 1) + `,`,
|
||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||
`Labels:` + mapStringForLabels + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -526,7 +711,8 @@ func (this *DiffResponse) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&DiffResponse{`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -554,7 +740,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -582,7 +768,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -591,11 +777,14 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Diff == nil {
|
||||
m.Diff = &containerd_types1.Descriptor{}
|
||||
m.Diff = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -615,7 +804,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -624,10 +813,13 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Mounts = append(m.Mounts, &containerd_types.Mount{})
|
||||
m.Mounts = append(m.Mounts, &types.Mount{})
|
||||
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -641,9 +833,13 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -668,7 +864,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -696,7 +892,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -705,11 +901,14 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Applied == nil {
|
||||
m.Applied = &containerd_types1.Descriptor{}
|
||||
m.Applied = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -724,9 +923,13 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -751,7 +954,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -779,7 +982,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -788,10 +991,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Left = append(m.Left, &containerd_types.Mount{})
|
||||
m.Left = append(m.Left, &types.Mount{})
|
||||
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -810,7 +1016,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -819,10 +1025,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Right = append(m.Right, &containerd_types.Mount{})
|
||||
m.Right = append(m.Right, &types.Mount{})
|
||||
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -841,7 +1050,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -851,6 +1060,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -870,7 +1082,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -880,6 +1092,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -899,7 +1114,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -908,6 +1123,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -928,7 +1146,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -945,7 +1163,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -955,6 +1173,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -971,7 +1192,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -981,6 +1202,9 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1012,9 +1236,13 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1039,7 +1267,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1067,7 +1295,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1076,11 +1304,14 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Diff == nil {
|
||||
m.Diff = &containerd_types1.Descriptor{}
|
||||
m.Diff = &types.Descriptor{}
|
||||
}
|
||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -1095,9 +1326,13 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDiff
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1161,10 +1396,13 @@ func skipDiff(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1193,6 +1431,9 @@ func skipDiff(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDiff
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1211,40 +1452,3 @@ var (
|
||||
ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
|
||||
}
|
||||
|
||||
var fileDescriptorDiff = []byte{
|
||||
// 457 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
|
||||
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
|
||||
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
|
||||
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
|
||||
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
|
||||
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
|
||||
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
|
||||
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
|
||||
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
|
||||
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
|
||||
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
|
||||
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
|
||||
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
|
||||
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
|
||||
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
|
||||
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
|
||||
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
|
||||
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
|
||||
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
|
||||
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
|
||||
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
|
||||
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
|
||||
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
|
||||
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
|
||||
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
|
||||
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
|
||||
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
451
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
451
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
@ -1,43 +1,22 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||
|
||||
/*
|
||||
Package events is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PublishRequest
|
||||
ForwardRequest
|
||||
SubscribeRequest
|
||||
Envelope
|
||||
*/
|
||||
package events
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import google_protobuf2 "github.com/gogo/protobuf/types"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import typeurl "github.com/containerd/typeurl"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
github_com_containerd_typeurl "github.com/containerd/typeurl"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -52,40 +31,164 @@ var _ = time.Kitchen
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type PublishRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *types.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||
func (*PublishRequest) ProtoMessage() {}
|
||||
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
|
||||
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||
func (*PublishRequest) ProtoMessage() {}
|
||||
func (*PublishRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{0}
|
||||
}
|
||||
func (m *PublishRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PublishRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PublishRequest.Merge(m, src)
|
||||
}
|
||||
func (m *PublishRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PublishRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PublishRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PublishRequest proto.InternalMessageInfo
|
||||
|
||||
type ForwardRequest struct {
|
||||
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
|
||||
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ForwardRequest) Reset() { *m = ForwardRequest{} }
|
||||
func (*ForwardRequest) ProtoMessage() {}
|
||||
func (*ForwardRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
|
||||
func (m *ForwardRequest) Reset() { *m = ForwardRequest{} }
|
||||
func (*ForwardRequest) ProtoMessage() {}
|
||||
func (*ForwardRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{1}
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ForwardRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ForwardRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ForwardRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ForwardRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo
|
||||
|
||||
type SubscribeRequest struct {
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||
func (*SubscribeRequest) ProtoMessage() {}
|
||||
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
|
||||
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||
func (*SubscribeRequest) ProtoMessage() {}
|
||||
func (*SubscribeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{2}
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SubscribeRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *SubscribeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SubscribeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo
|
||||
|
||||
type Envelope struct {
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{3} }
|
||||
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
func (*Envelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_43fcd20dc1642376, []int{3}
|
||||
}
|
||||
func (m *Envelope) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Envelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Envelope.Merge(m, src)
|
||||
}
|
||||
func (m *Envelope) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Envelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Envelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Envelope proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
|
||||
@ -94,6 +197,44 @@ func init() {
|
||||
proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptor_43fcd20dc1642376)
|
||||
}
|
||||
|
||||
var fileDescriptor_43fcd20dc1642376 = []byte{
|
||||
// 466 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
|
||||
0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
|
||||
0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
|
||||
0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
|
||||
0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
|
||||
0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
|
||||
0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
|
||||
0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
|
||||
0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
|
||||
0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
|
||||
0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
|
||||
0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
|
||||
0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
|
||||
0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
|
||||
0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
|
||||
0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
|
||||
0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
|
||||
0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
|
||||
0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
|
||||
0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
|
||||
0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
|
||||
0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
|
||||
0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
|
||||
0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
|
||||
0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
|
||||
0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
|
||||
0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
|
||||
0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
|
||||
0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
// Field returns the value for the given fieldpath as a string, if defined.
|
||||
// If the value is not defined, the second value will be false.
|
||||
func (m *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
@ -108,7 +249,7 @@ func (m *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
case "topic":
|
||||
return string(m.Topic), len(m.Topic) > 0
|
||||
case "event":
|
||||
decoded, err := typeurl.UnmarshalAny(m.Event)
|
||||
decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
@ -130,20 +271,21 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Events service
|
||||
|
||||
// EventsClient is the client API for Events service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type EventsClient interface {
|
||||
// Publish an event to a topic.
|
||||
//
|
||||
// The event will be packed into a timestamp envelope with the namespace
|
||||
// introspected from the context. The envelope will then be dispatched.
|
||||
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error)
|
||||
// Forward sends an event that has already been packaged into an envelope
|
||||
// with a timestamp and namespace.
|
||||
//
|
||||
// This is useful if earlier timestamping is required or when forwarding on
|
||||
// behalf of another component, namespace or publisher.
|
||||
Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error)
|
||||
// Subscribe to a stream of events, possibly returning only that match any
|
||||
// of the provided filters.
|
||||
//
|
||||
@ -162,18 +304,18 @@ func NewEventsClient(cc *grpc.ClientConn) EventsClient {
|
||||
return &eventsClient{cc}
|
||||
}
|
||||
|
||||
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
|
||||
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) {
|
||||
out := new(types.Empty)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, c.cc, opts...)
|
||||
func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) {
|
||||
out := new(types.Empty)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -181,7 +323,7 @@ func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...
|
||||
}
|
||||
|
||||
func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_Events_serviceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -212,20 +354,19 @@ func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Server API for Events service
|
||||
|
||||
// EventsServer is the server API for Events service.
|
||||
type EventsServer interface {
|
||||
// Publish an event to a topic.
|
||||
//
|
||||
// The event will be packed into a timestamp envelope with the namespace
|
||||
// introspected from the context. The envelope will then be dispatched.
|
||||
Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
|
||||
Publish(context.Context, *PublishRequest) (*types.Empty, error)
|
||||
// Forward sends an event that has already been packaged into an envelope
|
||||
// with a timestamp and namespace.
|
||||
//
|
||||
// This is useful if earlier timestamping is required or when forwarding on
|
||||
// behalf of another component, namespace or publisher.
|
||||
Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
|
||||
Forward(context.Context, *ForwardRequest) (*types.Empty, error)
|
||||
// Subscribe to a stream of events, possibly returning only that match any
|
||||
// of the provided filters.
|
||||
//
|
||||
@ -351,6 +492,9 @@ func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -379,6 +523,9 @@ func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -412,6 +559,9 @@ func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -432,8 +582,8 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintEvents(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp)))
|
||||
n3, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -460,6 +610,9 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n4
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -473,6 +626,9 @@ func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PublishRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Topic)
|
||||
@ -483,20 +639,32 @@ func (m *PublishRequest) Size() (n int) {
|
||||
l = m.Event.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ForwardRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Envelope != nil {
|
||||
l = m.Envelope.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *SubscribeRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
@ -505,13 +673,19 @@ func (m *SubscribeRequest) Size() (n int) {
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Envelope) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = types.SizeOfStdTime(m.Timestamp)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
l = len(m.Namespace)
|
||||
if l > 0 {
|
||||
@ -525,6 +699,9 @@ func (m *Envelope) Size() (n int) {
|
||||
l = m.Event.Size()
|
||||
n += 1 + l + sovEvents(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -547,7 +724,8 @@ func (this *PublishRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PublishRequest{`,
|
||||
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -558,6 +736,7 @@ func (this *ForwardRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ForwardRequest{`,
|
||||
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -568,6 +747,7 @@ func (this *SubscribeRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&SubscribeRequest{`,
|
||||
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -577,10 +757,11 @@ func (this *Envelope) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Envelope{`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
|
||||
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -608,7 +789,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -636,7 +817,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -646,6 +827,9 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -665,7 +849,7 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -674,11 +858,14 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Event == nil {
|
||||
m.Event = &google_protobuf1.Any{}
|
||||
m.Event = &types.Any{}
|
||||
}
|
||||
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -693,9 +880,13 @@ func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -720,7 +911,7 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -748,7 +939,7 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -757,6 +948,9 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -776,9 +970,13 @@ func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -803,7 +1001,7 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -831,7 +1029,7 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -841,6 +1039,9 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -855,9 +1056,13 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -882,7 +1087,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -910,7 +1115,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -919,10 +1124,13 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -940,7 +1148,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -950,6 +1158,9 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -969,7 +1180,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -979,6 +1190,9 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -998,7 +1212,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1007,11 +1221,14 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Event == nil {
|
||||
m.Event = &google_protobuf1.Any{}
|
||||
m.Event = &types.Any{}
|
||||
}
|
||||
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -1026,9 +1243,13 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEvents
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1092,10 +1313,13 @@ func skipEvents(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1124,6 +1348,9 @@ func skipEvents(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEvents
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1142,41 +1369,3 @@ var (
|
||||
ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
|
||||
}
|
||||
|
||||
var fileDescriptorEvents = []byte{
|
||||
// 466 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
|
||||
0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
|
||||
0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
|
||||
0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
|
||||
0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
|
||||
0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
|
||||
0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
|
||||
0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
|
||||
0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
|
||||
0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
|
||||
0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
|
||||
0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
|
||||
0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
|
||||
0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
|
||||
0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
|
||||
0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
|
||||
0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
|
||||
0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
|
||||
0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
|
||||
0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
|
||||
0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
|
||||
0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
|
||||
0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
|
||||
0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
|
||||
0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
|
||||
0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
|
||||
0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
|
||||
0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
|
||||
0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
865
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
865
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
377
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
377
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
@ -1,35 +1,21 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
|
||||
|
||||
/*
|
||||
Package introspection is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Plugin
|
||||
PluginsRequest
|
||||
PluginsResponse
|
||||
*/
|
||||
package introspection
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import containerd_types "github.com/containerd/containerd/api/types"
|
||||
import google_rpc "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
types "github.com/containerd/containerd/api/types"
|
||||
rpc "github.com/gogo/googleapis/google/rpc"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -51,7 +37,7 @@ type Plugin struct {
|
||||
// ID identifies the plugin uniquely in the system.
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// Requires lists the plugin types required by this plugin.
|
||||
Requires []string `protobuf:"bytes,3,rep,name=requires" json:"requires,omitempty"`
|
||||
Requires []string `protobuf:"bytes,3,rep,name=requires,proto3" json:"requires,omitempty"`
|
||||
// Platforms enumerates the platforms this plugin will support.
|
||||
//
|
||||
// If values are provided here, the plugin will only be operable under the
|
||||
@ -61,30 +47,61 @@ type Plugin struct {
|
||||
//
|
||||
// If the plugin prefers certain platforms over others, they should be
|
||||
// listed from most to least preferred.
|
||||
Platforms []containerd_types.Platform `protobuf:"bytes,4,rep,name=platforms" json:"platforms"`
|
||||
Platforms []types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms"`
|
||||
// Exports allows plugins to provide values about state or configuration to
|
||||
// interested parties.
|
||||
//
|
||||
// One example is exposing the configured path of a snapshotter plugin.
|
||||
Exports map[string]string `protobuf:"bytes,5,rep,name=exports" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Exports map[string]string `protobuf:"bytes,5,rep,name=exports,proto3" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Capabilities allows plugins to communicate feature switches to allow
|
||||
// clients to detect features that may not be on be default or may be
|
||||
// different from version to version.
|
||||
//
|
||||
// Use this sparingly.
|
||||
Capabilities []string `protobuf:"bytes,6,rep,name=capabilities" json:"capabilities,omitempty"`
|
||||
Capabilities []string `protobuf:"bytes,6,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
|
||||
// InitErr will be set if the plugin fails initialization.
|
||||
//
|
||||
// This means the plugin may have been registered but a non-terminal error
|
||||
// was encountered during initialization.
|
||||
//
|
||||
// Plugins that have this value set cannot be used.
|
||||
InitErr *google_rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr" json:"init_err,omitempty"`
|
||||
InitErr *rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Plugin) Reset() { *m = Plugin{} }
|
||||
func (*Plugin) ProtoMessage() {}
|
||||
func (*Plugin) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{0} }
|
||||
func (m *Plugin) Reset() { *m = Plugin{} }
|
||||
func (*Plugin) ProtoMessage() {}
|
||||
func (*Plugin) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{0}
|
||||
}
|
||||
func (m *Plugin) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Plugin.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Plugin) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Plugin.Merge(m, src)
|
||||
}
|
||||
func (m *Plugin) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Plugin) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Plugin.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Plugin proto.InternalMessageInfo
|
||||
|
||||
type PluginsRequest struct {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
@ -97,27 +114,129 @@ type PluginsRequest struct {
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PluginsRequest) Reset() { *m = PluginsRequest{} }
|
||||
func (*PluginsRequest) ProtoMessage() {}
|
||||
func (*PluginsRequest) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{1} }
|
||||
func (m *PluginsRequest) Reset() { *m = PluginsRequest{} }
|
||||
func (*PluginsRequest) ProtoMessage() {}
|
||||
func (*PluginsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{1}
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PluginsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *PluginsRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PluginsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PluginsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PluginsRequest proto.InternalMessageInfo
|
||||
|
||||
type PluginsResponse struct {
|
||||
Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins" json:"plugins"`
|
||||
Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PluginsResponse) Reset() { *m = PluginsResponse{} }
|
||||
func (*PluginsResponse) ProtoMessage() {}
|
||||
func (*PluginsResponse) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{2} }
|
||||
func (m *PluginsResponse) Reset() { *m = PluginsResponse{} }
|
||||
func (*PluginsResponse) ProtoMessage() {}
|
||||
func (*PluginsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1a14fda866f10715, []int{2}
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PluginsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *PluginsResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *PluginsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PluginsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
|
||||
proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
|
||||
proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptor_1a14fda866f10715)
|
||||
}
|
||||
|
||||
var fileDescriptor_1a14fda866f10715 = []byte{
|
||||
// 487 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
|
||||
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
|
||||
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
|
||||
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
|
||||
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
|
||||
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
|
||||
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
|
||||
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
|
||||
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
|
||||
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
|
||||
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
|
||||
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
|
||||
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
|
||||
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
|
||||
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
|
||||
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
|
||||
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
|
||||
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
|
||||
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
|
||||
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
|
||||
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
|
||||
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
|
||||
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
|
||||
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
|
||||
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
|
||||
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
|
||||
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
|
||||
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
|
||||
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
|
||||
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -126,8 +245,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Introspection service
|
||||
|
||||
// IntrospectionClient is the client API for Introspection service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type IntrospectionClient interface {
|
||||
// Plugins returns a list of plugins in containerd.
|
||||
//
|
||||
@ -146,15 +266,14 @@ func NewIntrospectionClient(cc *grpc.ClientConn) IntrospectionClient {
|
||||
|
||||
func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) {
|
||||
out := new(PluginsResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Introspection service
|
||||
|
||||
// IntrospectionServer is the server API for Introspection service.
|
||||
type IntrospectionServer interface {
|
||||
// Plugins returns a list of plugins in containerd.
|
||||
//
|
||||
@ -294,6 +413,9 @@ func (m *Plugin) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -327,6 +449,9 @@ func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -357,6 +482,9 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -370,6 +498,9 @@ func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Plugin) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
@ -410,10 +541,16 @@ func (m *Plugin) Size() (n int) {
|
||||
l = m.InitErr.Size()
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginsRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
@ -422,10 +559,16 @@ func (m *PluginsRequest) Size() (n int) {
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginsResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Plugins) > 0 {
|
||||
@ -434,6 +577,9 @@ func (m *PluginsResponse) Size() (n int) {
|
||||
n += 1 + l + sovIntrospection(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -458,7 +604,7 @@ func (this *Plugin) String() string {
|
||||
for k, _ := range this.Exports {
|
||||
keysForExports = append(keysForExports, k)
|
||||
}
|
||||
sortkeys.Strings(keysForExports)
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForExports)
|
||||
mapStringForExports := "map[string]string{"
|
||||
for _, k := range keysForExports {
|
||||
mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k])
|
||||
@ -468,10 +614,11 @@ func (this *Plugin) String() string {
|
||||
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Requires:` + fmt.Sprintf("%v", this.Requires) + `,`,
|
||||
`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "containerd_types.Platform", 1), `&`, ``, 1) + `,`,
|
||||
`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "types.Platform", 1), `&`, ``, 1) + `,`,
|
||||
`Exports:` + mapStringForExports + `,`,
|
||||
`Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`,
|
||||
`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "google_rpc.Status", 1) + `,`,
|
||||
`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -482,6 +629,7 @@ func (this *PluginsRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PluginsRequest{`,
|
||||
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -492,6 +640,7 @@ func (this *PluginsResponse) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&PluginsResponse{`,
|
||||
`Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "Plugin", "Plugin", 1), `&`, ``, 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -519,7 +668,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -547,7 +696,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -557,6 +706,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -576,7 +728,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -586,6 +738,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -605,7 +760,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -615,6 +770,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -634,7 +792,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -643,10 +801,13 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Platforms = append(m.Platforms, containerd_types.Platform{})
|
||||
m.Platforms = append(m.Platforms, types.Platform{})
|
||||
if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -665,7 +826,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -674,6 +835,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -694,7 +858,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -711,7 +875,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -721,6 +885,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -737,7 +904,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -747,6 +914,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -783,7 +953,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -793,6 +963,9 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -812,7 +985,7 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -821,11 +994,14 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.InitErr == nil {
|
||||
m.InitErr = &google_rpc.Status{}
|
||||
m.InitErr = &rpc.Status{}
|
||||
}
|
||||
if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -840,9 +1016,13 @@ func (m *Plugin) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -867,7 +1047,7 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -895,7 +1075,7 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -905,6 +1085,9 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -919,9 +1102,13 @@ func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -946,7 +1133,7 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -974,7 +1161,7 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -983,6 +1170,9 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1000,9 +1190,13 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthIntrospection
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1066,10 +1260,13 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1098,6 +1295,9 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthIntrospection
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1116,42 +1316,3 @@ var (
|
||||
ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptorIntrospection)
|
||||
}
|
||||
|
||||
var fileDescriptorIntrospection = []byte{
|
||||
// 487 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
|
||||
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
|
||||
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
|
||||
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
|
||||
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
|
||||
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
|
||||
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
|
||||
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
|
||||
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
|
||||
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
|
||||
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
|
||||
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
|
||||
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
|
||||
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
|
||||
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
|
||||
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
|
||||
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
|
||||
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
|
||||
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
|
||||
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
|
||||
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
|
||||
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
|
||||
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
|
||||
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
|
||||
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
|
||||
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
|
||||
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
|
||||
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
|
||||
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
|
||||
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
583
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
583
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
797
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
797
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1490
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
1490
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2224
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
2224
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
177
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
177
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
@ -1,31 +1,19 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
|
||||
/*
|
||||
Package version is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
|
||||
It has these top-level messages:
|
||||
VersionResponse
|
||||
*/
|
||||
package version
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/types"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import context "golang.org/x/net/context"
|
||||
import grpc "google.golang.org/grpc"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -39,18 +27,73 @@ var _ = math.Inf
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type VersionResponse struct {
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||
func (*VersionResponse) ProtoMessage() {}
|
||||
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||
func (*VersionResponse) ProtoMessage() {}
|
||||
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_128109001e578ffe, []int{0}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VersionResponse.Merge(m, src)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *VersionResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VersionResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VersionResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe)
|
||||
}
|
||||
|
||||
var fileDescriptor_128109001e578ffe = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -59,10 +102,11 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Version service
|
||||
|
||||
// VersionClient is the client API for Version service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type VersionClient interface {
|
||||
Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
||||
Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
type versionClient struct {
|
||||
@ -73,19 +117,18 @@ func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
||||
return &versionClient{cc}
|
||||
}
|
||||
|
||||
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||
func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||
out := new(VersionResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Version service
|
||||
|
||||
// VersionServer is the server API for Version service.
|
||||
type VersionServer interface {
|
||||
Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error)
|
||||
Version(context.Context, *types.Empty) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
||||
@ -93,7 +136,7 @@ func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
||||
}
|
||||
|
||||
func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(google_protobuf.Empty)
|
||||
in := new(types.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -105,7 +148,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
||||
FullMethod: "/containerd.services.version.v1.Version/Version",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
||||
return srv.(VersionServer).Version(ctx, req.(*types.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@ -150,6 +193,9 @@ func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
|
||||
i += copy(dAtA[i:], m.Revision)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -163,6 +209,9 @@ func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *VersionResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Version)
|
||||
@ -173,6 +222,9 @@ func (m *VersionResponse) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovVersion(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -196,6 +248,7 @@ func (this *VersionResponse) String() string {
|
||||
s := strings.Join([]string{`&VersionResponse{`,
|
||||
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
|
||||
`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -223,7 +276,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -251,7 +304,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -261,6 +314,9 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -280,7 +336,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -290,6 +346,9 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -304,9 +363,13 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -370,10 +433,13 @@ func skipVersion(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -402,6 +468,9 @@ func skipVersion(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -420,27 +489,3 @@ var (
|
||||
ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
|
||||
}
|
||||
|
||||
var fileDescriptorVersion = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
333
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
333
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
@ -1,35 +1,18 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/types/descriptor.proto
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/types/descriptor.proto
|
||||
github.com/containerd/containerd/api/types/metrics.proto
|
||||
github.com/containerd/containerd/api/types/mount.proto
|
||||
github.com/containerd/containerd/api/types/platform.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Descriptor
|
||||
Metric
|
||||
Mount
|
||||
Platform
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -48,18 +31,80 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
// oci descriptor found in a manifest.
|
||||
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
|
||||
type Descriptor struct {
|
||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
||||
func (*Descriptor) ProtoMessage() {}
|
||||
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
||||
func (*Descriptor) ProtoMessage() {}
|
||||
func (*Descriptor) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_37f958df3707db9e, []int{0}
|
||||
}
|
||||
func (m *Descriptor) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Descriptor) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Descriptor.Merge(m, src)
|
||||
}
|
||||
func (m *Descriptor) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Descriptor) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Descriptor.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Descriptor proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
|
||||
proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e)
|
||||
}
|
||||
|
||||
var fileDescriptor_37f958df3707db9e = []byte{
|
||||
// 311 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||
0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29,
|
||||
0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90,
|
||||
0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90,
|
||||
0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20,
|
||||
0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82,
|
||||
0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c,
|
||||
0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
|
||||
0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62,
|
||||
0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04,
|
||||
0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70,
|
||||
0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a,
|
||||
0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1,
|
||||
0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22,
|
||||
0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -92,6 +137,26 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
for k, _ := range m.Annotations {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
v := m.Annotations[k]
|
||||
mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
|
||||
i += copy(dAtA[i:], k)
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -105,6 +170,9 @@ func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Descriptor) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.MediaType)
|
||||
@ -118,6 +186,17 @@ func (m *Descriptor) Size() (n int) {
|
||||
if m.Size_ != 0 {
|
||||
n += 1 + sovDescriptor(uint64(m.Size_))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
for k, v := range m.Annotations {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||
n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -138,10 +217,22 @@ func (this *Descriptor) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
keysForAnnotations := make([]string, 0, len(this.Annotations))
|
||||
for k, _ := range this.Annotations {
|
||||
keysForAnnotations = append(keysForAnnotations, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
|
||||
mapStringForAnnotations := "map[string]string{"
|
||||
for _, k := range keysForAnnotations {
|
||||
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
|
||||
}
|
||||
mapStringForAnnotations += "}"
|
||||
s := strings.Join([]string{`&Descriptor{`,
|
||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
|
||||
`Annotations:` + mapStringForAnnotations + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -169,7 +260,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -197,7 +288,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -207,6 +298,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -226,7 +320,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -236,6 +330,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -255,11 +352,138 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Size_ |= (int64(b) & 0x7F) << shift
|
||||
m.Size_ |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Annotations == nil {
|
||||
m.Annotations = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDescriptor
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
m.Annotations[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||
@ -269,9 +493,13 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDescriptor
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -335,10 +563,13 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -367,6 +598,9 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDescriptor
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -385,26 +619,3 @@ var (
|
||||
ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
|
||||
}
|
||||
|
||||
var fileDescriptorDescriptor = []byte{
|
||||
// 234 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
|
||||
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
|
||||
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
|
||||
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
|
||||
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
|
||||
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
|
||||
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
|
||||
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
|
||||
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
|
||||
0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
@ -15,4 +15,5 @@ message Descriptor {
|
||||
string media_type = 1;
|
||||
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
int64 size = 3;
|
||||
map<string, string> annotations = 5;
|
||||
}
|
||||
|
||||
180
vendor/github.com/containerd/containerd/api/types/metrics.pb.go
generated
vendored
180
vendor/github.com/containerd/containerd/api/types/metrics.pb.go
generated
vendored
@ -3,22 +3,17 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import types1 "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -26,19 +21,82 @@ var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Metric struct {
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8d594d87edf6e6bc, []int{0}
|
||||
}
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(m, src)
|
||||
}
|
||||
func (m *Metric) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Metric) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metric.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc)
|
||||
}
|
||||
|
||||
var fileDescriptor_8d594d87edf6e6bc = []byte{
|
||||
// 258 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
|
||||
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Metric) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -56,8 +114,8 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp)))
|
||||
n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -78,6 +136,9 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -91,9 +152,12 @@ func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Metric) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = types1.SizeOfStdTime(m.Timestamp)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
l = len(m.ID)
|
||||
if l > 0 {
|
||||
@ -103,6 +167,9 @@ func (m *Metric) Size() (n int) {
|
||||
l = m.Data.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -124,9 +191,10 @@ func (this *Metric) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Metric{`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -154,7 +222,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -182,7 +250,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -191,10 +259,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -212,7 +283,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -222,6 +293,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -241,7 +315,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -250,11 +324,14 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Data == nil {
|
||||
m.Data = &google_protobuf1.Any{}
|
||||
m.Data = &types.Any{}
|
||||
}
|
||||
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -269,9 +346,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -335,10 +416,13 @@ func skipMetrics(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -367,6 +451,9 @@ func skipMetrics(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -385,28 +472,3 @@ var (
|
||||
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
|
||||
}
|
||||
|
||||
var fileDescriptorMetrics = []byte{
|
||||
// 258 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
|
||||
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
150
vendor/github.com/containerd/containerd/api/types/mount.pb.go
generated
vendored
150
vendor/github.com/containerd/containerd/api/types/mount.pb.go
generated
vendored
@ -3,22 +3,26 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Mount describes mounts for a container.
|
||||
//
|
||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
||||
@ -35,16 +39,69 @@ type Mount struct {
|
||||
// Target path in container
|
||||
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
|
||||
// Options specifies zero or more fstab style mount options.
|
||||
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
|
||||
Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Mount) Reset() { *m = Mount{} }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
||||
func (m *Mount) Reset() { *m = Mount{} }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_920196890d4a7b9f, []int{0}
|
||||
}
|
||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Mount) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Mount.Merge(m, src)
|
||||
}
|
||||
func (m *Mount) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Mount) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Mount.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Mount proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f)
|
||||
}
|
||||
|
||||
var fileDescriptor_920196890d4a7b9f = []byte{
|
||||
// 202 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
|
||||
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -93,6 +150,9 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -106,6 +166,9 @@ func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Mount) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
@ -126,6 +189,9 @@ func (m *Mount) Size() (n int) {
|
||||
n += 1 + l + sovMount(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -151,6 +217,7 @@ func (this *Mount) String() string {
|
||||
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
|
||||
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
|
||||
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -178,7 +245,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -206,7 +273,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -216,6 +283,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -235,7 +305,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -245,6 +315,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -264,7 +337,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -274,6 +347,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -293,7 +369,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -303,6 +379,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -317,9 +396,13 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthMount
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -383,10 +466,13 @@ func skipMount(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -415,6 +501,9 @@ func skipMount(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMount
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -433,24 +522,3 @@ var (
|
||||
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
|
||||
}
|
||||
|
||||
var fileDescriptorMount = []byte{
|
||||
// 202 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
|
||||
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
149
vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
149
vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
@ -3,37 +3,94 @@
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Platform follows the structure of the OCI platform specification, from
|
||||
// descriptors.
|
||||
type Platform struct {
|
||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Platform) Reset() { *m = Platform{} }
|
||||
func (*Platform) ProtoMessage() {}
|
||||
func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} }
|
||||
func (m *Platform) Reset() { *m = Platform{} }
|
||||
func (*Platform) ProtoMessage() {}
|
||||
func (*Platform) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_24ba7a4b83e2367e, []int{0}
|
||||
}
|
||||
func (m *Platform) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Platform.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Platform) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Platform.Merge(m, src)
|
||||
}
|
||||
func (m *Platform) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Platform) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Platform.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Platform proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e)
|
||||
}
|
||||
|
||||
var fileDescriptor_24ba7a4b83e2367e = []byte{
|
||||
// 205 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
|
||||
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
|
||||
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
|
||||
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
|
||||
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
|
||||
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
|
||||
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
|
||||
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
|
||||
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Platform) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -67,6 +124,9 @@ func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
|
||||
i += copy(dAtA[i:], m.Variant)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -80,6 +140,9 @@ func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Platform) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.OS)
|
||||
@ -94,6 +157,9 @@ func (m *Platform) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlatform(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -118,6 +184,7 @@ func (this *Platform) String() string {
|
||||
`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
|
||||
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
|
||||
`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -145,7 +212,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -173,7 +240,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -183,6 +250,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -202,7 +272,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -212,6 +282,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -231,7 +304,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -241,6 +314,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -255,9 +331,13 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthPlatform
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -321,10 +401,13 @@ func skipPlatform(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -353,6 +436,9 @@ func skipPlatform(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthPlatform
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -371,24 +457,3 @@ var (
|
||||
ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform)
|
||||
}
|
||||
|
||||
var fileDescriptorPlatform = []byte{
|
||||
// 205 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
|
||||
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
|
||||
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
|
||||
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
|
||||
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
|
||||
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
|
||||
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
|
||||
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
|
||||
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
329
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
329
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
@ -1,34 +1,19 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/types/task/task.proto
|
||||
|
||||
/*
|
||||
Package task is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/types/task/task.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Process
|
||||
ProcessInfo
|
||||
*/
|
||||
package task
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
import google_protobuf2 "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -61,6 +46,7 @@ var Status_name = map[int32]string{
|
||||
4: "PAUSED",
|
||||
5: "PAUSING",
|
||||
}
|
||||
|
||||
var Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"CREATED": 1,
|
||||
@ -73,24 +59,58 @@ var Status_value = map[string]int32{
|
||||
func (x Status) String() string {
|
||||
return proto.EnumName(Status_name, int32(x))
|
||||
}
|
||||
func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
|
||||
|
||||
type Process struct {
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
|
||||
func (Status) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
||||
}
|
||||
|
||||
func (m *Process) Reset() { *m = Process{} }
|
||||
func (*Process) ProtoMessage() {}
|
||||
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
|
||||
type Process struct {
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Process) Reset() { *m = Process{} }
|
||||
func (*Process) ProtoMessage() {}
|
||||
func (*Process) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
||||
}
|
||||
func (m *Process) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Process.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Process) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Process.Merge(m, src)
|
||||
}
|
||||
func (m *Process) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Process) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Process.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Process proto.InternalMessageInfo
|
||||
|
||||
type ProcessInfo struct {
|
||||
// PID is the process ID.
|
||||
@ -98,18 +118,93 @@ type ProcessInfo struct {
|
||||
// Info contains additional process information.
|
||||
//
|
||||
// Info varies by platform.
|
||||
Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
|
||||
Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
|
||||
func (*ProcessInfo) ProtoMessage() {}
|
||||
func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
|
||||
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
|
||||
func (*ProcessInfo) ProtoMessage() {}
|
||||
func (*ProcessInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_391ef18c8ab0dc16, []int{1}
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessInfo) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProcessInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
|
||||
proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
|
||||
proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
|
||||
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16)
|
||||
}
|
||||
|
||||
var fileDescriptor_391ef18c8ab0dc16 = []byte{
|
||||
// 545 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
|
||||
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
|
||||
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
|
||||
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
|
||||
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
|
||||
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
|
||||
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
|
||||
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
|
||||
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
|
||||
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
|
||||
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
|
||||
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
|
||||
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
|
||||
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
|
||||
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
|
||||
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
|
||||
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
|
||||
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
|
||||
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
|
||||
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
|
||||
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
|
||||
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
|
||||
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
|
||||
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
|
||||
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
|
||||
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
|
||||
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
|
||||
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
|
||||
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
|
||||
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
|
||||
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
|
||||
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
|
||||
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
func (m *Process) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -182,12 +277,15 @@ func (m *Process) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
dAtA[i] = 0x52
|
||||
i++
|
||||
i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
|
||||
n1, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
||||
i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -221,6 +319,9 @@ func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -234,6 +335,9 @@ func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Process) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ContainerID)
|
||||
@ -268,12 +372,18 @@ func (m *Process) Size() (n int) {
|
||||
if m.ExitStatus != 0 {
|
||||
n += 1 + sovTask(uint64(m.ExitStatus))
|
||||
}
|
||||
l = types.SizeOfStdTime(m.ExitedAt)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
|
||||
n += 1 + l + sovTask(uint64(l))
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ProcessInfo) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Pid != 0 {
|
||||
@ -283,6 +393,9 @@ func (m *ProcessInfo) Size() (n int) {
|
||||
l = m.Info.Size()
|
||||
n += 1 + l + sovTask(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -313,7 +426,8 @@ func (this *Process) String() string {
|
||||
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
|
||||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
|
||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -324,7 +438,8 @@ func (this *ProcessInfo) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ProcessInfo{`,
|
||||
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
|
||||
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`,
|
||||
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -352,7 +467,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -380,7 +495,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -390,6 +505,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -409,7 +527,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -419,6 +537,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -438,7 +559,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Pid |= (uint32(b) & 0x7F) << shift
|
||||
m.Pid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -457,7 +578,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Status |= (Status(b) & 0x7F) << shift
|
||||
m.Status |= Status(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -476,7 +597,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -486,6 +607,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -505,7 +629,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -515,6 +639,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -534,7 +661,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -544,6 +671,9 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -563,7 +693,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -583,7 +713,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ExitStatus |= (uint32(b) & 0x7F) << shift
|
||||
m.ExitStatus |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -602,7 +732,7 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -611,10 +741,13 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@ -627,9 +760,13 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -654,7 +791,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -682,7 +819,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Pid |= (uint32(b) & 0x7F) << shift
|
||||
m.Pid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -701,7 +838,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -710,11 +847,14 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Info == nil {
|
||||
m.Info = &google_protobuf2.Any{}
|
||||
m.Info = &types.Any{}
|
||||
}
|
||||
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
@ -729,9 +869,13 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -795,10 +939,13 @@ func skipTask(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -827,6 +974,9 @@ func skipTask(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTask
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -845,46 +995,3 @@ var (
|
||||
ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask)
|
||||
}
|
||||
|
||||
var fileDescriptorTask = []byte{
|
||||
// 545 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
|
||||
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
|
||||
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
|
||||
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
|
||||
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
|
||||
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
|
||||
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
|
||||
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
|
||||
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
|
||||
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
|
||||
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
|
||||
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
|
||||
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
|
||||
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
|
||||
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
|
||||
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
|
||||
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
|
||||
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
|
||||
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
|
||||
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
|
||||
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
|
||||
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
|
||||
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
|
||||
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
|
||||
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
|
||||
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
|
||||
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
|
||||
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
|
||||
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
|
||||
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
|
||||
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
|
||||
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
|
||||
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
35
vendor/github.com/containerd/containerd/client.go
generated
vendored
35
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -136,6 +136,20 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
if copts.services == nil && c.conn == nil {
|
||||
return nil, errors.New("no grpc connection or services is available")
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
||||
namespaces := c.NamespaceService()
|
||||
ctx := context.Background()
|
||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
||||
c.runtime = defaultRuntime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -152,6 +166,20 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||
conn: conn,
|
||||
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
||||
namespaces := c.NamespaceService()
|
||||
ctx := context.Background()
|
||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
||||
c.runtime = defaultRuntime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if copts.services != nil {
|
||||
c.services = *copts.services
|
||||
}
|
||||
@ -594,6 +622,13 @@ func (c *Client) VersionService() versionservice.VersionClient {
|
||||
return versionservice.NewVersionClient(c.conn)
|
||||
}
|
||||
|
||||
// Conn returns the underlying GRPC connection object
|
||||
func (c *Client) Conn() *grpc.ClientConn {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Version of containerd
|
||||
type Version struct {
|
||||
// Version number
|
||||
|
||||
9
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
9
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
@ -70,10 +70,11 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta
|
||||
for _, d := range task.Descriptors {
|
||||
platformSpec := platforms.DefaultSpec()
|
||||
index.Manifests = append(index.Manifests, imagespec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size_,
|
||||
Digest: d.Digest,
|
||||
Platform: &platformSpec,
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size_,
|
||||
Digest: d.Digest,
|
||||
Platform: &platformSpec,
|
||||
Annotations: d.Annotations,
|
||||
})
|
||||
}
|
||||
// save copts
|
||||
|
||||
21
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
21
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
@ -20,7 +20,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
@ -107,7 +109,7 @@ func WithSnapshotter(name string) NewContainerOpts {
|
||||
// WithSnapshot uses an existing root filesystem for the container
|
||||
func WithSnapshot(id string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
// check that the snapshot exists, if not, fail on creation
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||
return err
|
||||
@ -125,7 +127,7 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
@ -155,7 +157,7 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
@ -166,9 +168,18 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
||||
}
|
||||
}
|
||||
|
||||
func setSnapshotterIfEmpty(c *containers.Container) {
|
||||
func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) {
|
||||
if c.Snapshotter == "" {
|
||||
c.Snapshotter = DefaultSnapshotter
|
||||
defaultSnapshotter := DefaultSnapshotter
|
||||
namespaceService := client.NamespaceService()
|
||||
if ns, err := namespaces.NamespaceRequired(ctx); err == nil {
|
||||
if labels, err := namespaceService.Labels(ctx, ns); err == nil {
|
||||
if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok {
|
||||
defaultSnapshotter = snapshotLabel
|
||||
}
|
||||
}
|
||||
}
|
||||
c.Snapshotter = defaultSnapshotter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
@ -50,7 +50,7 @@ func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool
|
||||
return err
|
||||
}
|
||||
|
||||
setSnapshotterIfEmpty(c)
|
||||
setSnapshotterIfEmpty(ctx, client, c)
|
||||
|
||||
var (
|
||||
snapshotter = client.SnapshotService(c.Snapshotter)
|
||||
|
||||
78
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
78
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
@ -33,6 +33,7 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containerd/continuity"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -477,6 +478,35 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
||||
return w, nil // lock is now held by w.
|
||||
}
|
||||
|
||||
func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
|
||||
path, _, data := s.ingestPaths(ref)
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
||||
}
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return status, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
fp, err := os.Open(data)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
||||
bufPool.Put(p)
|
||||
fp.Close()
|
||||
return status, err
|
||||
}
|
||||
|
||||
// writer provides the main implementation of the Writer method. The caller
|
||||
// must hold the lock correctly and release on error if there is a problem.
|
||||
func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
|
||||
@ -498,45 +528,25 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
updatedAt time.Time
|
||||
)
|
||||
|
||||
foundValidIngest := false
|
||||
// ensure that the ingest path has been created.
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed reading status of resume write")
|
||||
status, err := s.resumeStatus(ref, total, digester)
|
||||
if err == nil {
|
||||
foundValidIngest = true
|
||||
updatedAt = status.UpdatedAt
|
||||
startedAt = status.StartedAt
|
||||
total = status.Total
|
||||
offset = status.Offset
|
||||
} else {
|
||||
logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
fp, err := os.Open(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
||||
bufPool.Put(p)
|
||||
fp.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updatedAt = status.UpdatedAt
|
||||
startedAt = status.StartedAt
|
||||
total = status.Total
|
||||
} else {
|
||||
if !foundValidIngest {
|
||||
startedAt = time.Now()
|
||||
updatedAt = startedAt
|
||||
|
||||
@ -546,11 +556,11 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
||||
if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
||||
if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
5
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
5
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
@ -74,6 +74,9 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
@ -81,8 +84,6 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
fp := w.fp
|
||||
w.fp = nil
|
||||
|
||||
|
||||
6
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
6
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
@ -23,4 +23,10 @@ const (
|
||||
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||
// sending protobufs passed over the GRPC API.
|
||||
DefaultMaxSendMsgSize = 16 << 20
|
||||
// DefaultRuntimeNSLabel defines the namespace label to check for
|
||||
// default runtime
|
||||
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||
// DefaultSnapshotterNSLabel defines the namespances label to check for
|
||||
// default snapshotter
|
||||
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||
)
|
||||
|
||||
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
@ -26,10 +26,10 @@ import (
|
||||
var (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
|
||||
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
|
||||
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
14
vendor/github.com/containerd/containerd/diff.go
generated
vendored
14
vendor/github.com/containerd/containerd/diff.go
generated
vendored
@ -80,17 +80,19 @@ func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...di
|
||||
|
||||
func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
||||
return &types.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
14
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
@ -137,16 +137,18 @@ func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
|
||||
|
||||
func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size: desc.Size_,
|
||||
Digest: desc.Digest,
|
||||
MediaType: desc.MediaType,
|
||||
Size: desc.Size_,
|
||||
Digest: desc.Digest,
|
||||
Annotations: desc.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func descToProto(desc *ocispec.Descriptor) types.Descriptor {
|
||||
return types.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size_: desc.Size,
|
||||
Digest: desc.Digest,
|
||||
MediaType: desc.MediaType,
|
||||
Size_: desc.Size,
|
||||
Digest: desc.Digest,
|
||||
Annotations: desc.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
26
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
26
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Self retrieves a list of mounts for the current running process.
|
||||
@ -41,13 +43,15 @@ func Self() ([]Info, error) {
|
||||
func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
out := []Info{}
|
||||
|
||||
var err error
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
if err = s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
See http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
|
||||
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
@ -68,7 +72,7 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
numFields := len(fields)
|
||||
if numFields < 10 {
|
||||
// should be at least 10 fields
|
||||
return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
|
||||
return nil, errors.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
|
||||
}
|
||||
p := Info{}
|
||||
// ignore any numbers parsing errors, as there should not be any
|
||||
@ -76,13 +80,19 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
p.Parent, _ = strconv.Atoi(fields[1])
|
||||
mm := strings.Split(fields[2], ":")
|
||||
if len(mm) != 2 {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
||||
return nil, errors.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
||||
}
|
||||
p.Major, _ = strconv.Atoi(mm[0])
|
||||
p.Minor, _ = strconv.Atoi(mm[1])
|
||||
|
||||
p.Root = fields[3]
|
||||
p.Mountpoint = fields[4]
|
||||
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
|
||||
}
|
||||
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
|
||||
}
|
||||
p.Options = fields[5]
|
||||
|
||||
// one or more optional fields, when a separator (-)
|
||||
@ -101,11 +111,11 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
}
|
||||
}
|
||||
if i == numFields {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: missing separator ('-')", text)
|
||||
return nil, errors.Errorf("parsing '%s' failed: missing separator ('-')", text)
|
||||
}
|
||||
// There should be 3 fields after the separator...
|
||||
if i+4 > numFields {
|
||||
return nil, fmt.Errorf("parsing '%s' failed: not enough fields after a separator", text)
|
||||
return nil, errors.Errorf("parsing '%s' failed: not enough fields after a separator", text)
|
||||
}
|
||||
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
|
||||
// (like "//serv/My Documents") _may_ end up having a space in the last field
|
||||
|
||||
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
@ -741,7 +741,9 @@ func WithCapabilities(caps []string) SpecOpts {
|
||||
}
|
||||
|
||||
// WithAllCapabilities sets all linux capabilities for the process
|
||||
var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
|
||||
var WithAllCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
|
||||
return WithCapabilities(GetAllCapabilities())(ctx, client, c, s)
|
||||
}
|
||||
|
||||
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
|
||||
// or CAP_BLOCK_SUSPEND on RHEL6
|
||||
@ -771,11 +773,14 @@ func capsContain(caps []string, s string) bool {
|
||||
}
|
||||
|
||||
func removeCap(caps *[]string, s string) {
|
||||
for i, c := range *caps {
|
||||
var newcaps []string
|
||||
for _, c := range *caps {
|
||||
if c == s {
|
||||
*caps = append((*caps)[:i], (*caps)[i+1:]...)
|
||||
continue
|
||||
}
|
||||
newcaps = append(newcaps, c)
|
||||
}
|
||||
*caps = newcaps
|
||||
}
|
||||
|
||||
// WithAddedCapabilities adds the provided capabilities
|
||||
|
||||
11
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
11
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -123,6 +124,16 @@ type Service interface {
|
||||
Register(*grpc.Server) error
|
||||
}
|
||||
|
||||
// TTRPCService allows TTRPC services to be registered with the underlying server
|
||||
type TTRPCService interface {
|
||||
RegisterTTRPC(*ttrpc.Server) error
|
||||
}
|
||||
|
||||
// TCPService allows GRPC services to be registered with the underlying tcp server
|
||||
type TCPService interface {
|
||||
RegisterTCP(*grpc.Server) error
|
||||
}
|
||||
|
||||
var register = struct {
|
||||
sync.RWMutex
|
||||
r []*Registration
|
||||
|
||||
9
vendor/github.com/containerd/containerd/process.go
generated
vendored
9
vendor/github.com/containerd/containerd/process.go
generated
vendored
@ -52,6 +52,15 @@ type Process interface {
|
||||
Status(context.Context) (Status, error)
|
||||
}
|
||||
|
||||
// NewExitStatus populates an ExitStatus
|
||||
func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus {
|
||||
return &ExitStatus{
|
||||
code: code,
|
||||
exitedAt: t,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ExitStatus encapsulates a process' exit status.
|
||||
// It is used by `Wait()` to return either a process exit code or an error
|
||||
type ExitStatus struct {
|
||||
|
||||
10
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
10
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
@ -18,6 +18,7 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -28,6 +29,7 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -101,12 +103,16 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
|
||||
// really distinguish between a 206 and a 200. In the case of 200, we
|
||||
// can discard the bytes, hiding the seek behavior from the
|
||||
// implementation.
|
||||
defer resp.Body.Close()
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
|
||||
}
|
||||
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
||||
var registryErr errcode.Errors
|
||||
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
||||
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
||||
}
|
||||
return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error())
|
||||
}
|
||||
if offset > 0 {
|
||||
cr := resp.Header.Get("content-range")
|
||||
|
||||
6
vendor/github.com/containerd/containerd/remotes/resolver.go
generated
vendored
6
vendor/github.com/containerd/containerd/remotes/resolver.go
generated
vendored
@ -72,9 +72,9 @@ func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.Re
|
||||
|
||||
// PusherFunc allows package users to implement a Pusher with just a
|
||||
// function.
|
||||
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
|
||||
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error)
|
||||
|
||||
// Push content
|
||||
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
|
||||
return fn(ctx, desc, r)
|
||||
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
490
vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
generated
vendored
490
vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
generated
vendored
@ -1,30 +1,16 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/runtime/linux/runctypes/runc.proto
|
||||
|
||||
/*
|
||||
Package runctypes is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/runtime/linux/runctypes/runc.proto
|
||||
|
||||
It has these top-level messages:
|
||||
RuncOptions
|
||||
CreateOptions
|
||||
CheckpointOptions
|
||||
ProcessDetails
|
||||
*/
|
||||
package runctypes
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -38,59 +24,183 @@ var _ = math.Inf
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type RuncOptions struct {
|
||||
Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
|
||||
RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
|
||||
CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
|
||||
SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
|
||||
Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
|
||||
RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
|
||||
CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
|
||||
SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RuncOptions) Reset() { *m = RuncOptions{} }
|
||||
func (*RuncOptions) ProtoMessage() {}
|
||||
func (*RuncOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{0} }
|
||||
func (m *RuncOptions) Reset() { *m = RuncOptions{} }
|
||||
func (*RuncOptions) ProtoMessage() {}
|
||||
func (*RuncOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d20e2ba8b3cc58b9, []int{0}
|
||||
}
|
||||
func (m *RuncOptions) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *RuncOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_RuncOptions.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *RuncOptions) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RuncOptions.Merge(m, src)
|
||||
}
|
||||
func (m *RuncOptions) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *RuncOptions) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RuncOptions.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RuncOptions proto.InternalMessageInfo
|
||||
|
||||
type CreateOptions struct {
|
||||
NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
|
||||
OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
|
||||
ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
|
||||
Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
|
||||
CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
|
||||
NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
|
||||
ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
|
||||
IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
|
||||
IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
|
||||
CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
|
||||
CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
|
||||
NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
|
||||
OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
|
||||
ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
|
||||
Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
|
||||
CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
|
||||
NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
|
||||
ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
|
||||
IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
|
||||
IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
|
||||
CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
|
||||
CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateOptions) Reset() { *m = CreateOptions{} }
|
||||
func (*CreateOptions) ProtoMessage() {}
|
||||
func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{1} }
|
||||
func (m *CreateOptions) Reset() { *m = CreateOptions{} }
|
||||
func (*CreateOptions) ProtoMessage() {}
|
||||
func (*CreateOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d20e2ba8b3cc58b9, []int{1}
|
||||
}
|
||||
func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CreateOptions.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CreateOptions) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateOptions.Merge(m, src)
|
||||
}
|
||||
func (m *CreateOptions) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CreateOptions) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateOptions.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
|
||||
|
||||
type CheckpointOptions struct {
|
||||
Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
|
||||
OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
|
||||
ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
|
||||
Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
|
||||
CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
|
||||
WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
|
||||
ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
|
||||
Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
|
||||
OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
|
||||
ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
|
||||
Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
|
||||
CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
|
||||
WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
|
||||
ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} }
|
||||
func (*CheckpointOptions) ProtoMessage() {}
|
||||
func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{2} }
|
||||
func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} }
|
||||
func (*CheckpointOptions) ProtoMessage() {}
|
||||
func (*CheckpointOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d20e2ba8b3cc58b9, []int{2}
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CheckpointOptions.Merge(m, src)
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CheckpointOptions.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo
|
||||
|
||||
type ProcessDetails struct {
|
||||
ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
|
||||
ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{3} }
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d20e2ba8b3cc58b9, []int{3}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessDetails.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProcessDetails) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessDetails.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions")
|
||||
@ -98,6 +208,53 @@ func init() {
|
||||
proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions")
|
||||
proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptor_d20e2ba8b3cc58b9)
|
||||
}
|
||||
|
||||
var fileDescriptor_d20e2ba8b3cc58b9 = []byte{
|
||||
// 604 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f,
|
||||
0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48,
|
||||
0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1,
|
||||
0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd,
|
||||
0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25,
|
||||
0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1,
|
||||
0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57,
|
||||
0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4,
|
||||
0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58,
|
||||
0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09,
|
||||
0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99,
|
||||
0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60,
|
||||
0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d,
|
||||
0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a,
|
||||
0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53,
|
||||
0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b,
|
||||
0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8,
|
||||
0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4,
|
||||
0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a,
|
||||
0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15,
|
||||
0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89,
|
||||
0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23,
|
||||
0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61,
|
||||
0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73,
|
||||
0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e,
|
||||
0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed,
|
||||
0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46,
|
||||
0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8,
|
||||
0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32,
|
||||
0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95,
|
||||
0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7,
|
||||
0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a,
|
||||
0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7,
|
||||
0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95,
|
||||
0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c,
|
||||
0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06,
|
||||
0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -141,6 +298,9 @@ func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -268,6 +428,9 @@ func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath)))
|
||||
i += copy(dAtA[i:], m.CriuImagePath)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -369,6 +532,9 @@ func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath)))
|
||||
i += copy(dAtA[i:], m.ImagePath)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -393,6 +559,9 @@ func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID)))
|
||||
i += copy(dAtA[i:], m.ExecID)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -406,6 +575,9 @@ func encodeVarintRunc(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *RuncOptions) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Runtime)
|
||||
@ -423,10 +595,16 @@ func (m *RuncOptions) Size() (n int) {
|
||||
if m.SystemdCgroup {
|
||||
n += 2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *CreateOptions) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.NoPivotRoot {
|
||||
@ -475,10 +653,16 @@ func (m *CreateOptions) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *CheckpointOptions) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Exit {
|
||||
@ -514,16 +698,25 @@ func (m *CheckpointOptions) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ExecID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -549,6 +742,7 @@ func (this *RuncOptions) String() string {
|
||||
`RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`,
|
||||
`CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`,
|
||||
`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -571,6 +765,7 @@ func (this *CreateOptions) String() string {
|
||||
`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
|
||||
`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
|
||||
`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -589,6 +784,7 @@ func (this *CheckpointOptions) String() string {
|
||||
`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
|
||||
`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
|
||||
`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -599,6 +795,7 @@ func (this *ProcessDetails) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ProcessDetails{`,
|
||||
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -626,7 +823,7 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -654,7 +851,7 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -664,6 +861,9 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -683,7 +883,7 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -693,6 +893,9 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -712,7 +915,7 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -722,6 +925,9 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -741,7 +947,7 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -756,9 +962,13 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -783,7 +993,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -811,7 +1021,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -831,7 +1041,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -851,7 +1061,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -871,7 +1081,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -891,7 +1101,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -911,7 +1121,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -921,6 +1131,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -940,7 +1153,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -950,6 +1163,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -969,7 +1185,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -989,7 +1205,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -999,6 +1215,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1018,7 +1237,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.IoUid |= (uint32(b) & 0x7F) << shift
|
||||
m.IoUid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1037,7 +1256,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.IoGid |= (uint32(b) & 0x7F) << shift
|
||||
m.IoGid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1056,7 +1275,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1066,6 +1285,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1085,7 +1307,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1095,6 +1317,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1109,9 +1334,13 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1136,7 +1365,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1164,7 +1393,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1184,7 +1413,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1204,7 +1433,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1224,7 +1453,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1244,7 +1473,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1264,7 +1493,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1274,6 +1503,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1293,7 +1525,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1303,6 +1535,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1322,7 +1557,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1332,6 +1567,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1351,7 +1589,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1361,6 +1599,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1375,9 +1616,13 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1402,7 +1647,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1430,7 +1675,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1440,6 +1685,9 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1454,9 +1702,13 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1520,10 +1772,13 @@ func skipRunc(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthRunc
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthRunc
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1552,6 +1807,9 @@ func skipRunc(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthRunc
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1570,49 +1828,3 @@ var (
|
||||
ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowRunc = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptorRunc)
|
||||
}
|
||||
|
||||
var fileDescriptorRunc = []byte{
|
||||
// 604 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f,
|
||||
0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48,
|
||||
0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1,
|
||||
0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd,
|
||||
0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25,
|
||||
0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1,
|
||||
0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57,
|
||||
0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4,
|
||||
0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58,
|
||||
0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09,
|
||||
0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99,
|
||||
0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60,
|
||||
0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d,
|
||||
0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a,
|
||||
0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53,
|
||||
0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b,
|
||||
0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8,
|
||||
0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4,
|
||||
0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a,
|
||||
0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15,
|
||||
0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89,
|
||||
0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23,
|
||||
0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61,
|
||||
0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73,
|
||||
0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e,
|
||||
0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed,
|
||||
0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46,
|
||||
0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8,
|
||||
0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32,
|
||||
0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95,
|
||||
0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7,
|
||||
0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a,
|
||||
0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7,
|
||||
0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95,
|
||||
0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c,
|
||||
0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06,
|
||||
0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
370
vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
generated
vendored
370
vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
generated
vendored
@ -1,29 +1,16 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
|
||||
|
||||
/*
|
||||
Package options is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Options
|
||||
CheckpointOptions
|
||||
ProcessDetails
|
||||
*/
|
||||
package options
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -58,12 +45,43 @@ type Options struct {
|
||||
// criu image path
|
||||
CriuImagePath string `protobuf:"bytes,10,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
|
||||
// criu work path
|
||||
CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
|
||||
CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Options) Reset() { *m = Options{} }
|
||||
func (*Options) ProtoMessage() {}
|
||||
func (*Options) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{0} }
|
||||
func (m *Options) Reset() { *m = Options{} }
|
||||
func (*Options) ProtoMessage() {}
|
||||
func (*Options) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4e5440d739e9a863, []int{0}
|
||||
}
|
||||
func (m *Options) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Options.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Options) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Options.Merge(m, src)
|
||||
}
|
||||
func (m *Options) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Options) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Options.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Options proto.InternalMessageInfo
|
||||
|
||||
type CheckpointOptions struct {
|
||||
// exit the container after a checkpoint
|
||||
@ -77,33 +95,141 @@ type CheckpointOptions struct {
|
||||
// allow checkpointing of file locks
|
||||
FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
|
||||
// restore provided namespaces as empty namespaces
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
|
||||
EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
|
||||
// set the cgroups mode, soft, full, strict
|
||||
CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
|
||||
// checkpoint image path
|
||||
ImagePath string `protobuf:"bytes,8,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
|
||||
// checkpoint work path
|
||||
WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
|
||||
WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} }
|
||||
func (*CheckpointOptions) ProtoMessage() {}
|
||||
func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{1} }
|
||||
func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} }
|
||||
func (*CheckpointOptions) ProtoMessage() {}
|
||||
func (*CheckpointOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4e5440d739e9a863, []int{1}
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CheckpointOptions.Merge(m, src)
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CheckpointOptions) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CheckpointOptions.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo
|
||||
|
||||
type ProcessDetails struct {
|
||||
// exec process id if the process is managed by a shim
|
||||
ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
|
||||
ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{2} }
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4e5440d739e9a863, []int{2}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessDetails.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProcessDetails) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessDetails.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Options)(nil), "containerd.runc.v1.Options")
|
||||
proto.RegisterType((*CheckpointOptions)(nil), "containerd.runc.v1.CheckpointOptions")
|
||||
proto.RegisterType((*ProcessDetails)(nil), "containerd.runc.v1.ProcessDetails")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptor_4e5440d739e9a863)
|
||||
}
|
||||
|
||||
var fileDescriptor_4e5440d739e9a863 = []byte{
|
||||
// 587 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82,
|
||||
0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97,
|
||||
0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4,
|
||||
0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3,
|
||||
0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49,
|
||||
0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29,
|
||||
0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf,
|
||||
0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff,
|
||||
0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41,
|
||||
0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6,
|
||||
0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e,
|
||||
0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8,
|
||||
0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12,
|
||||
0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c,
|
||||
0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c,
|
||||
0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81,
|
||||
0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f,
|
||||
0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b,
|
||||
0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d,
|
||||
0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd,
|
||||
0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99,
|
||||
0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61,
|
||||
0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a,
|
||||
0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3,
|
||||
0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed,
|
||||
0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f,
|
||||
0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04,
|
||||
0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74,
|
||||
0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca,
|
||||
0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd,
|
||||
0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3,
|
||||
0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1,
|
||||
0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0,
|
||||
0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7,
|
||||
0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00,
|
||||
0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Options) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -195,6 +321,9 @@ func (m *Options) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath)))
|
||||
i += copy(dAtA[i:], m.CriuWorkPath)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -296,6 +425,9 @@ func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath)))
|
||||
i += copy(dAtA[i:], m.WorkPath)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -320,6 +452,9 @@ func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID)))
|
||||
i += copy(dAtA[i:], m.ExecID)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -333,6 +468,9 @@ func encodeVarintOci(dAtA []byte, offset int, v uint64) int {
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Options) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.NoPivotRoot {
|
||||
@ -374,10 +512,16 @@ func (m *Options) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOci(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *CheckpointOptions) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Exit {
|
||||
@ -413,16 +557,25 @@ func (m *CheckpointOptions) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOci(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ExecID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOci(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -455,6 +608,7 @@ func (this *Options) String() string {
|
||||
`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
|
||||
`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
|
||||
`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -473,6 +627,7 @@ func (this *CheckpointOptions) String() string {
|
||||
`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
|
||||
`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
|
||||
`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -483,6 +638,7 @@ func (this *ProcessDetails) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&ProcessDetails{`,
|
||||
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -510,7 +666,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -538,7 +694,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -558,7 +714,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -578,7 +734,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -588,6 +744,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -607,7 +766,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.IoUid |= (uint32(b) & 0x7F) << shift
|
||||
m.IoUid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -626,7 +785,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.IoGid |= (uint32(b) & 0x7F) << shift
|
||||
m.IoGid |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -645,7 +804,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -655,6 +814,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -674,7 +836,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -684,6 +846,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -703,7 +868,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -713,6 +878,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -732,7 +900,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -752,7 +920,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -762,6 +930,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -781,7 +952,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -791,6 +962,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -805,9 +979,13 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -832,7 +1010,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -860,7 +1038,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -880,7 +1058,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -900,7 +1078,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -920,7 +1098,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -940,7 +1118,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -960,7 +1138,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -970,6 +1148,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -989,7 +1170,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -999,6 +1180,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1018,7 +1202,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1028,6 +1212,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1047,7 +1234,7 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1057,6 +1244,9 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1071,9 +1261,13 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1098,7 +1292,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1126,7 +1320,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
@ -1136,6 +1330,9 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@ -1150,9 +1347,13 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthOci
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@ -1216,10 +1417,13 @@ func skipOci(dAtA []byte) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthOci
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthOci
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
@ -1248,6 +1452,9 @@ func skipOci(dAtA []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthOci
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
@ -1266,48 +1473,3 @@ var (
|
||||
ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowOci = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptorOci)
|
||||
}
|
||||
|
||||
var fileDescriptorOci = []byte{
|
||||
// 587 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82,
|
||||
0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97,
|
||||
0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4,
|
||||
0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3,
|
||||
0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49,
|
||||
0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29,
|
||||
0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf,
|
||||
0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff,
|
||||
0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41,
|
||||
0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6,
|
||||
0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e,
|
||||
0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8,
|
||||
0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12,
|
||||
0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c,
|
||||
0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c,
|
||||
0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81,
|
||||
0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f,
|
||||
0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b,
|
||||
0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d,
|
||||
0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd,
|
||||
0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99,
|
||||
0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61,
|
||||
0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a,
|
||||
0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3,
|
||||
0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed,
|
||||
0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f,
|
||||
0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04,
|
||||
0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74,
|
||||
0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca,
|
||||
0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd,
|
||||
0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3,
|
||||
0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1,
|
||||
0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0,
|
||||
0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7,
|
||||
0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00,
|
||||
0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
5
vendor/github.com/containerd/containerd/sys/filesys_unix.go
generated
vendored
5
vendor/github.com/containerd/containerd/sys/filesys_unix.go
generated
vendored
@ -24,3 +24,8 @@ import "os"
|
||||
func ForceRemoveAll(path string) error {
|
||||
return os.RemoveAll(path)
|
||||
}
|
||||
|
||||
// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
10
vendor/github.com/containerd/containerd/sys/filesys_windows.go
generated
vendored
10
vendor/github.com/containerd/containerd/sys/filesys_windows.go
generated
vendored
@ -30,6 +30,11 @@ import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
)
|
||||
|
||||
const (
|
||||
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
|
||||
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||
)
|
||||
|
||||
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
||||
// ACL'd for Builtin Administrators and Local System.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode) error {
|
||||
@ -78,7 +83,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error {
|
||||
|
||||
if j > 1 {
|
||||
// Create parent
|
||||
err = mkdirall(path[0:j-1], false)
|
||||
err = mkdirall(path[0:j-1], adminAndLocalSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,8 +117,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error {
|
||||
// and Local System.
|
||||
func mkdirWithACL(name string) error {
|
||||
sa := syscall.SecurityAttributes{Length: 0}
|
||||
sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||
sd, err := winio.SddlToSecurityDescriptor(sddl)
|
||||
sd, err := winio.SddlToSecurityDescriptor(SddlAdministratorsLocalSystem)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||
}
|
||||
|
||||
12
vendor/github.com/containerd/containerd/sys/oom_unix.go
generated
vendored
12
vendor/github.com/containerd/containerd/sys/oom_unix.go
generated
vendored
@ -20,8 +20,10 @@ package sys
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
@ -45,3 +47,13 @@ func SetOOMScore(pid, score int) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOOMScoreAdj gets the oom score for a process
|
||||
func GetOOMScoreAdj(pid int) (int, error) {
|
||||
path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.Atoi(strings.TrimSpace(string(data)))
|
||||
}
|
||||
|
||||
7
vendor/github.com/containerd/containerd/sys/oom_windows.go
generated
vendored
7
vendor/github.com/containerd/containerd/sys/oom_windows.go
generated
vendored
@ -22,3 +22,10 @@ package sys
|
||||
func SetOOMScore(pid, score int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOOMScoreAdj gets the oom score for a process
|
||||
//
|
||||
// Not implemented on Windows
|
||||
func GetOOMScoreAdj(pid int) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/containerd/containerd/task.go
generated
vendored
4
vendor/github.com/containerd/containerd/task.go
generated
vendored
@ -521,6 +521,9 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error {
|
||||
}
|
||||
|
||||
func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) {
|
||||
if id == t.id && ioAttach == nil {
|
||||
return t, nil
|
||||
}
|
||||
response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
|
||||
ContainerID: t.id,
|
||||
ExecID: id,
|
||||
@ -582,6 +585,7 @@ func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tas
|
||||
OS: goruntime.GOOS,
|
||||
Architecture: goruntime.GOARCH,
|
||||
},
|
||||
Annotations: d.Annotations,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
||||
7
vendor/github.com/containerd/containerd/task_opts.go
generated
vendored
7
vendor/github.com/containerd/containerd/task_opts.go
generated
vendored
@ -59,9 +59,10 @@ func WithTaskCheckpoint(im Image) NewTaskOpts {
|
||||
for _, m := range index.Manifests {
|
||||
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
||||
info.Checkpoint = &types.Descriptor{
|
||||
MediaType: m.MediaType,
|
||||
Size_: m.Size,
|
||||
Digest: m.Digest,
|
||||
MediaType: m.MediaType,
|
||||
Size_: m.Size,
|
||||
Digest: m.Digest,
|
||||
Annotations: m.Annotations,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
65
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
65
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
@ -1,52 +1,51 @@
|
||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
|
||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
|
||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244
|
||||
github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877
|
||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
|
||||
github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/docker/go-units v0.3.1
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
|
||||
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/gogo/protobuf v1.0.0
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/golang/protobuf v1.1.0
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/gogo/googleapis v1.2.0
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||
github.com/opencontainers/runc 2b18fe1d885ee5083ef9f0838fee39b62d653e30
|
||||
github.com/opencontainers/runc 029124da7af7360afa781a0234d1b083550f797c
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||
github.com/sirupsen/logrus v1.3.0
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||
google.golang.org/grpc v1.12.0
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio v0.4.12
|
||||
github.com/Microsoft/hcsshim v0.8.5
|
||||
github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac
|
||||
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
||||
gotest.tools v2.1.0
|
||||
github.com/google/go-cmp v0.1.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
gotest.tools v2.3.0
|
||||
github.com/google/go-cmp v0.2.0
|
||||
go.etcd.io/bbolt v1.3.2
|
||||
|
||||
# cri dependencies
|
||||
github.com/containerd/cri 4dd6735020f5596dd41738f8c4f5cb07fa804c5e # master
|
||||
github.com/containerd/cri 6d353571e64417d80c9478ffaea793714dd539d0 # master
|
||||
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
github.com/containernetworking/plugins v0.7.0
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
@ -60,31 +59,27 @@ github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||
github.com/json-iterator/go 1.1.5
|
||||
github.com/modern-go/reflect2 1.0.1
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/opencontainers/runtime-tools v0.6.0
|
||||
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
|
||||
github.com/opencontainers/selinux v1.2.1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
||||
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
||||
golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3
|
||||
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
k8s.io/api kubernetes-1.13.0
|
||||
k8s.io/apimachinery kubernetes-1.13.0
|
||||
k8s.io/apiserver kubernetes-1.13.0
|
||||
k8s.io/client-go kubernetes-1.13.0
|
||||
k8s.io/api kubernetes-1.15.0-alpha.0
|
||||
k8s.io/apimachinery kubernetes-1.15.0-alpha.0
|
||||
k8s.io/apiserver kubernetes-1.15.0-alpha.0
|
||||
k8s.io/client-go kubernetes-1.15.0-alpha.0
|
||||
k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
|
||||
k8s.io/kubernetes v1.13.0
|
||||
k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
|
||||
k8s.io/kubernetes v1.15.0-alpha.0
|
||||
k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
|
||||
# zfs dependencies
|
||||
github.com/containerd/zfs 9f6ef3b1fe5144bd91fe5855b4eba81bc0d17d03
|
||||
github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
|
||||
github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
|
||||
github.com/containerd/zfs 31af176f2ae84fe142ef2655bf7bb2aa618b3b1f
|
||||
github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb
|
||||
github.com/google/uuid v1.1.1
|
||||
|
||||
# aufs dependencies
|
||||
github.com/containerd/aufs da3cf16bfbe68ba8f114f1536a05c01528a25434
|
||||
github.com/containerd/aufs f894a800659b6e11c1a13084abd1712f346e349c
|
||||
|
||||
33
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
33
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -47,9 +46,8 @@ func pathChange(lower, upper *currentPath) (ChangeKind, string) {
|
||||
if upper == nil {
|
||||
return ChangeKindDelete, lower.path
|
||||
}
|
||||
// TODO: compare by directory
|
||||
|
||||
switch i := strings.Compare(lower.path, upper.path); {
|
||||
switch i := directoryCompare(lower.path, upper.path); {
|
||||
case i < 0:
|
||||
// File in lower that is not in upper
|
||||
return ChangeKindDelete, lower.path
|
||||
@ -61,6 +59,35 @@ func pathChange(lower, upper *currentPath) (ChangeKind, string) {
|
||||
}
|
||||
}
|
||||
|
||||
func directoryCompare(a, b string) int {
|
||||
l := len(a)
|
||||
if len(b) < l {
|
||||
l = len(b)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
c1, c2 := a[i], b[i]
|
||||
if c1 == filepath.Separator {
|
||||
c1 = byte(0)
|
||||
}
|
||||
if c2 == filepath.Separator {
|
||||
c2 = byte(0)
|
||||
}
|
||||
if c1 < c2 {
|
||||
return -1
|
||||
}
|
||||
if c1 > c2 {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(a) > len(b) {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func sameFile(f1, f2 *currentPath) (bool, error) {
|
||||
if os.SameFile(f1.f, f2.f) {
|
||||
return true, nil
|
||||
|
||||
201
vendor/github.com/containerd/ttrpc/LICENSE
generated
vendored
Normal file
201
vendor/github.com/containerd/ttrpc/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
62
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
Normal file
62
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
# ttrpc
|
||||
|
||||
[](https://travis-ci.org/containerd/ttrpc)
|
||||
|
||||
GRPC for low-memory environments.
|
||||
|
||||
The existing grpc-go project requires a lot of memory overhead for importing
|
||||
packages and at runtime. While this is great for many services with low density
|
||||
requirements, this can be a problem when running a large number of services on
|
||||
a single machine or on a machine with a small amount of memory.
|
||||
|
||||
Using the same GRPC definitions, this project reduces the binary size and
|
||||
protocol overhead required. We do this by eliding the `net/http`, `net/http2`
|
||||
and `grpc` package used by grpc replacing it with a lightweight framing
|
||||
protocol. The result are smaller binaries that use less resident memory with
|
||||
the same ease of use as GRPC.
|
||||
|
||||
Please note that while this project supports generating either end of the
|
||||
protocol, the generated service definitions will be incompatible with regular
|
||||
GRPC services, as they do not speak the same protocol.
|
||||
|
||||
# Usage
|
||||
|
||||
Create a gogo vanity binary (see
|
||||
[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an
|
||||
example with the ttrpc plugin enabled.
|
||||
|
||||
It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild)
|
||||
to build the protobufs for this project, but this will work with protoc
|
||||
directly, if required.
|
||||
|
||||
# Differences from GRPC
|
||||
|
||||
- The protocol stack has been replaced with a lighter protocol that doesn't
|
||||
require http, http2 and tls.
|
||||
- The client and server interface are identical whereas in GRPC there is a
|
||||
client and server interface that are different.
|
||||
- The Go stdlib context package is used instead.
|
||||
- No support for streams yet.
|
||||
|
||||
# Status
|
||||
|
||||
Very new. YMMV.
|
||||
|
||||
TODO:
|
||||
|
||||
- [X] Plumb error codes and GRPC status
|
||||
- [X] Remove use of any type and dependency on typeurl package
|
||||
- [X] Ensure that protocol can support streaming in the future
|
||||
- [ ] Document protocol layout
|
||||
- [ ] Add testing under concurrent load to ensure
|
||||
- [ ] Verify connection error handling
|
||||
|
||||
# Project details
|
||||
|
||||
ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
154
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
Normal file
154
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
messageHeaderLength = 10
|
||||
messageLengthMax = 4 << 20
|
||||
)
|
||||
|
||||
type messageType uint8
|
||||
|
||||
const (
|
||||
messageTypeRequest messageType = 0x1
|
||||
messageTypeResponse messageType = 0x2
|
||||
)
|
||||
|
||||
// messageHeader represents the fixed-length message header of 10 bytes sent
|
||||
// with every request.
|
||||
type messageHeader struct {
|
||||
Length uint32 // length excluding this header. b[:4]
|
||||
StreamID uint32 // identifies which request stream message is a part of. b[4:8]
|
||||
Type messageType // message type b[8]
|
||||
Flags uint8 // reserved b[9]
|
||||
}
|
||||
|
||||
func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) {
|
||||
_, err := io.ReadFull(r, p[:messageHeaderLength])
|
||||
if err != nil {
|
||||
return messageHeader{}, err
|
||||
}
|
||||
|
||||
return messageHeader{
|
||||
Length: binary.BigEndian.Uint32(p[:4]),
|
||||
StreamID: binary.BigEndian.Uint32(p[4:8]),
|
||||
Type: messageType(p[8]),
|
||||
Flags: p[9],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error {
|
||||
binary.BigEndian.PutUint32(p[:4], mh.Length)
|
||||
binary.BigEndian.PutUint32(p[4:8], mh.StreamID)
|
||||
p[8] = byte(mh.Type)
|
||||
p[9] = mh.Flags
|
||||
|
||||
_, err := w.Write(p[:])
|
||||
return err
|
||||
}
|
||||
|
||||
var buffers sync.Pool
|
||||
|
||||
type channel struct {
|
||||
conn net.Conn
|
||||
bw *bufio.Writer
|
||||
br *bufio.Reader
|
||||
hrbuf [messageHeaderLength]byte // avoid alloc when reading header
|
||||
hwbuf [messageHeaderLength]byte
|
||||
}
|
||||
|
||||
func newChannel(conn net.Conn) *channel {
|
||||
return &channel{
|
||||
conn: conn,
|
||||
bw: bufio.NewWriter(conn),
|
||||
br: bufio.NewReader(conn),
|
||||
}
|
||||
}
|
||||
|
||||
// recv a message from the channel. The returned buffer contains the message.
|
||||
//
|
||||
// If a valid grpc status is returned, the message header
|
||||
// returned will be valid and caller should send that along to
|
||||
// the correct consumer. The bytes on the underlying channel
|
||||
// will be discarded.
|
||||
func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) {
|
||||
mh, err := readMessageHeader(ch.hrbuf[:], ch.br)
|
||||
if err != nil {
|
||||
return messageHeader{}, nil, err
|
||||
}
|
||||
|
||||
if mh.Length > uint32(messageLengthMax) {
|
||||
if _, err := ch.br.Discard(int(mh.Length)); err != nil {
|
||||
return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message")
|
||||
}
|
||||
|
||||
return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax)
|
||||
}
|
||||
|
||||
p := ch.getmbuf(int(mh.Length))
|
||||
if _, err := io.ReadFull(ch.br, p); err != nil {
|
||||
return messageHeader{}, nil, errors.Wrapf(err, "failed reading message")
|
||||
}
|
||||
|
||||
return mh, p, nil
|
||||
}
|
||||
|
||||
func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error {
|
||||
if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := ch.bw.Write(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ch.bw.Flush()
|
||||
}
|
||||
|
||||
func (ch *channel) getmbuf(size int) []byte {
|
||||
// we can't use the standard New method on pool because we want to allocate
|
||||
// based on size.
|
||||
b, ok := buffers.Get().(*[]byte)
|
||||
if !ok || cap(*b) < size {
|
||||
// TODO(stevvooe): It may be better to allocate these in fixed length
|
||||
// buckets to reduce fragmentation but its not clear that would help
|
||||
// with performance. An ilogb approach or similar would work well.
|
||||
bb := make([]byte, size)
|
||||
b = &bb
|
||||
} else {
|
||||
*b = (*b)[:size]
|
||||
}
|
||||
return *b
|
||||
}
|
||||
|
||||
func (ch *channel) putmbuf(p []byte) {
|
||||
buffers.Put(&p)
|
||||
}
|
||||
290
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
Normal file
290
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ErrClosed is returned by client methods when the underlying connection is
|
||||
// closed.
|
||||
var ErrClosed = errors.New("ttrpc: closed")
|
||||
|
||||
type Client struct {
|
||||
codec codec
|
||||
conn net.Conn
|
||||
channel *channel
|
||||
calls chan *callRequest
|
||||
|
||||
closed chan struct{}
|
||||
closeOnce sync.Once
|
||||
closeFunc func()
|
||||
done chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func NewClient(conn net.Conn) *Client {
|
||||
c := &Client{
|
||||
codec: codec{},
|
||||
conn: conn,
|
||||
channel: newChannel(conn),
|
||||
calls: make(chan *callRequest),
|
||||
closed: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
closeFunc: func() {},
|
||||
}
|
||||
|
||||
go c.run()
|
||||
return c
|
||||
}
|
||||
|
||||
type callRequest struct {
|
||||
ctx context.Context
|
||||
req *Request
|
||||
resp *Response // response will be written back here
|
||||
errs chan error // error written here on completion
|
||||
}
|
||||
|
||||
func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error {
|
||||
payload, err := c.codec.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
creq = &Request{
|
||||
Service: service,
|
||||
Method: method,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
cresp = &Response{}
|
||||
)
|
||||
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds()
|
||||
}
|
||||
|
||||
if err := c.dispatch(ctx, creq, cresp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cresp.Status == nil {
|
||||
return errors.New("no status provided on response")
|
||||
}
|
||||
|
||||
return status.ErrorProto(cresp.Status)
|
||||
}
|
||||
|
||||
func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
|
||||
errs := make(chan error, 1)
|
||||
call := &callRequest{
|
||||
ctx: ctx,
|
||||
req: req,
|
||||
resp: resp,
|
||||
errs: errs,
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case c.calls <- call:
|
||||
case <-c.done:
|
||||
return c.err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case err := <-errs:
|
||||
return filterCloseErr(err)
|
||||
case <-c.done:
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) Close() error {
|
||||
c.closeOnce.Do(func() {
|
||||
close(c.closed)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnClose allows a close func to be called when the server is closed
|
||||
func (c *Client) OnClose(closer func()) {
|
||||
c.closeFunc = closer
|
||||
}
|
||||
|
||||
type message struct {
|
||||
messageHeader
|
||||
p []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *Client) run() {
|
||||
var (
|
||||
streamID uint32 = 1
|
||||
waiters = make(map[uint32]*callRequest)
|
||||
calls = c.calls
|
||||
incoming = make(chan *message)
|
||||
shutdown = make(chan struct{})
|
||||
shutdownErr error
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer close(shutdown)
|
||||
|
||||
// start one more goroutine to recv messages without blocking.
|
||||
for {
|
||||
mh, p, err := c.channel.recv(context.TODO())
|
||||
if err != nil {
|
||||
_, ok := status.FromError(err)
|
||||
if !ok {
|
||||
// treat all errors that are not an rpc status as terminal.
|
||||
// all others poison the connection.
|
||||
shutdownErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case incoming <- &message{
|
||||
messageHeader: mh,
|
||||
p: p[:mh.Length],
|
||||
err: err,
|
||||
}:
|
||||
case <-c.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
defer c.conn.Close()
|
||||
defer close(c.done)
|
||||
defer c.closeFunc()
|
||||
|
||||
for {
|
||||
select {
|
||||
case call := <-calls:
|
||||
if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil {
|
||||
call.errs <- err
|
||||
continue
|
||||
}
|
||||
|
||||
waiters[streamID] = call
|
||||
streamID += 2 // enforce odd client initiated request ids
|
||||
case msg := <-incoming:
|
||||
call, ok := waiters[msg.StreamID]
|
||||
if !ok {
|
||||
logrus.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID)
|
||||
continue
|
||||
}
|
||||
|
||||
call.errs <- c.recv(call.resp, msg)
|
||||
delete(waiters, msg.StreamID)
|
||||
case <-shutdown:
|
||||
if shutdownErr != nil {
|
||||
shutdownErr = filterCloseErr(shutdownErr)
|
||||
} else {
|
||||
shutdownErr = ErrClosed
|
||||
}
|
||||
|
||||
shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down")
|
||||
|
||||
c.err = shutdownErr
|
||||
for _, waiter := range waiters {
|
||||
waiter.errs <- shutdownErr
|
||||
}
|
||||
c.Close()
|
||||
return
|
||||
case <-c.closed:
|
||||
if c.err == nil {
|
||||
c.err = ErrClosed
|
||||
}
|
||||
// broadcast the shutdown error to the remaining waiters.
|
||||
for _, waiter := range waiters {
|
||||
waiter.errs <- c.err
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error {
|
||||
p, err := c.codec.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.channel.send(ctx, streamID, mtype, p)
|
||||
}
|
||||
|
||||
func (c *Client) recv(resp *Response, msg *message) error {
|
||||
if msg.err != nil {
|
||||
return msg.err
|
||||
}
|
||||
|
||||
if msg.Type != messageTypeResponse {
|
||||
return errors.New("unkown message type received")
|
||||
}
|
||||
|
||||
defer c.channel.putmbuf(msg.p)
|
||||
return proto.Unmarshal(msg.p, resp)
|
||||
}
|
||||
|
||||
// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when
|
||||
// returning from call or handling errors from main read loop.
|
||||
//
|
||||
// This purposely ignores errors with a wrapped cause.
|
||||
func filterCloseErr(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
// if we have an epipe on a write, we cast to errclosed
|
||||
if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" {
|
||||
if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE {
|
||||
return ErrClosed
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
42
vendor/github.com/containerd/ttrpc/codec.go
generated
vendored
Normal file
42
vendor/github.com/containerd/ttrpc/codec.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type codec struct{}
|
||||
|
||||
func (c codec) Marshal(msg interface{}) ([]byte, error) {
|
||||
switch v := msg.(type) {
|
||||
case proto.Message:
|
||||
return proto.Marshal(v)
|
||||
default:
|
||||
return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (c codec) Unmarshal(p []byte, msg interface{}) error {
|
||||
switch v := msg.(type) {
|
||||
case proto.Message:
|
||||
return proto.Unmarshal(p, v)
|
||||
default:
|
||||
return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg)
|
||||
}
|
||||
}
|
||||
39
vendor/github.com/containerd/ttrpc/config.go
generated
vendored
Normal file
39
vendor/github.com/containerd/ttrpc/config.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
type serverConfig struct {
|
||||
handshaker Handshaker
|
||||
}
|
||||
|
||||
type ServerOpt func(*serverConfig) error
|
||||
|
||||
// WithServerHandshaker can be passed to NewServer to ensure that the
|
||||
// handshaker is called before every connection attempt.
|
||||
//
|
||||
// Only one handshaker is allowed per server.
|
||||
func WithServerHandshaker(handshaker Handshaker) ServerOpt {
|
||||
return func(c *serverConfig) error {
|
||||
if c.handshaker != nil {
|
||||
return errors.New("only one handshaker allowed per server")
|
||||
}
|
||||
c.handshaker = handshaker
|
||||
return nil
|
||||
}
|
||||
}
|
||||
50
vendor/github.com/containerd/ttrpc/handshake.go
generated
vendored
Normal file
50
vendor/github.com/containerd/ttrpc/handshake.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Handshaker defines the interface for connection handshakes performed on the
|
||||
// server or client when first connecting.
|
||||
type Handshaker interface {
|
||||
// Handshake should confirm or decorate a connection that may be incoming
|
||||
// to a server or outgoing from a client.
|
||||
//
|
||||
// If this returns without an error, the caller should use the connection
|
||||
// in place of the original connection.
|
||||
//
|
||||
// The second return value can contain credential specific data, such as
|
||||
// unix socket credentials or TLS information.
|
||||
//
|
||||
// While we currently only have implementations on the server-side, this
|
||||
// interface should be sufficient to implement similar handshakes on the
|
||||
// client-side.
|
||||
Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
|
||||
}
|
||||
|
||||
type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
|
||||
|
||||
func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
|
||||
return fn(ctx, conn)
|
||||
}
|
||||
|
||||
func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
|
||||
return conn, nil, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user