Compare commits
51 Commits
v20.10.5
...
v18.06.1-c
| Author | SHA1 | Date | |
|---|---|---|---|
| 2749843368 | |||
| c31d0ea160 | |||
| 3c6ad54e95 | |||
| b3f1fb5418 | |||
| 37890ac58d | |||
| df806ee61c | |||
| ee7705035c | |||
| 46d424b49b | |||
| c2fa77357a | |||
| 3dbd9eaf78 | |||
| 35d05c2de6 | |||
| 911e86cb9f | |||
| 050f334818 | |||
| 2c9ca7465d | |||
| 11720451eb | |||
| 65a42c8236 | |||
| ddad2f519a | |||
| 4e6dbe6a5c | |||
| 2cb2e84287 | |||
| 97dd580c86 | |||
| deacc39445 | |||
| e1ee48ab04 | |||
| 254566169d | |||
| 9cb345caae | |||
| a92b4dc752 | |||
| 6f5a828403 | |||
| a33c562cf3 | |||
| 7178075fda | |||
| 744938f0b9 | |||
| 1d5e206fc2 | |||
| 31d6292458 | |||
| ebd85b10e2 | |||
| 8f5f3adf49 | |||
| 9fbab758a9 | |||
| b3d8fd5261 | |||
| a7c8c474b9 | |||
| 5f42140bab | |||
| ad80af43d0 | |||
| 359d5c8a76 | |||
| 08479b0776 | |||
| 00affb1dd5 | |||
| 0627568d60 | |||
| aeceff447c | |||
| 15695813a4 | |||
| 2ea275157d | |||
| c6c52ae29a | |||
| 8600d82bd7 | |||
| 74b30e7d58 | |||
| 547b9a4aba | |||
| 9bea855d07 | |||
| fe4d20bbb3 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@ -1,8 +1,8 @@
|
||||
# GitHub code owners
|
||||
# See https://github.com/blog/2392-introducing-code-owners
|
||||
|
||||
cli/command/stack/** @vdemeester
|
||||
cli/command/stack/** @vdemeester @silvin-lubecki
|
||||
cli/compose/** @vdemeester
|
||||
contrib/completion/bash/** @albers
|
||||
contrib/completion/zsh/** @sdurrheimer
|
||||
docs/** @mistyhacks @vdemeester @thaJeztah
|
||||
docs/** @vdemeester @thaJeztah
|
||||
|
||||
@ -41,7 +41,6 @@
|
||||
# TODO Describe the docs maintainers role.
|
||||
|
||||
people = [
|
||||
"misty",
|
||||
"thajeztah"
|
||||
]
|
||||
|
||||
@ -95,11 +94,6 @@
|
||||
Email = "justin.cormack@docker.com"
|
||||
GitHub = "justincormack"
|
||||
|
||||
[people.misty]
|
||||
Name = "Misty Stanley-Jones"
|
||||
Email = "misty@docker.com"
|
||||
GitHub = "mistyhacks"
|
||||
|
||||
[people.programmerq]
|
||||
Name = "Jeff Anderson"
|
||||
Email = "jeff@docker.com"
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
@ -176,8 +177,14 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||
|
||||
// nolint: gocyclo
|
||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
if os.Getenv("DOCKER_BUILDKIT") != "" {
|
||||
return runBuildBuildKit(dockerCli, options)
|
||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
||||
enableBuildkit, err := strconv.ParseBool(buildkitEnv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||
}
|
||||
if enableBuildkit {
|
||||
return runBuildBuildKit(dockerCli, options)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@ -1,12 +1,15 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/cli/cli"
|
||||
@ -43,6 +46,13 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
return errors.Errorf("buildkit not supported by daemon")
|
||||
}
|
||||
|
||||
if options.imageIDFile != "" {
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "removing image ID file")
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
remote string
|
||||
body io.Reader
|
||||
@ -159,6 +169,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
//nolint: gocyclo
|
||||
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) {
|
||||
response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions)
|
||||
if err != nil {
|
||||
@ -180,9 +191,8 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
|
||||
t := newTracer()
|
||||
ssArr := []*client.SolveStatus{}
|
||||
|
||||
displayStatus := func(displayCh chan *client.SolveStatus) {
|
||||
displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) {
|
||||
var c console.Console
|
||||
out := os.Stderr
|
||||
// TODO: Handle interactive output in non-interactive environment.
|
||||
consoleOpt := options.console.Value()
|
||||
if cons, err := console.ConsoleFromFile(out); err == nil && (consoleOpt == nil || *consoleOpt) {
|
||||
@ -210,15 +220,31 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
|
||||
}
|
||||
close(displayCh)
|
||||
}()
|
||||
displayStatus(displayCh)
|
||||
displayStatus(os.Stderr, displayCh)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
displayStatus(t.displayCh)
|
||||
displayStatus(os.Stdout, t.displayCh)
|
||||
}
|
||||
defer close(t.displayCh)
|
||||
err = jsonmessage.DisplayJSONMessagesStream(response.Body, os.Stdout, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), t.write)
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
imageID := ""
|
||||
writeAux := func(msg jsonmessage.JSONMessage) {
|
||||
if msg.ID == "moby.image.id" {
|
||||
var result types.BuildResult
|
||||
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
|
||||
fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err)
|
||||
}
|
||||
imageID = result.ID
|
||||
return
|
||||
}
|
||||
t.write(msg)
|
||||
}
|
||||
|
||||
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux)
|
||||
if err != nil {
|
||||
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
||||
// If no error code is set, default to 1
|
||||
@ -228,6 +254,26 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
|
||||
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
}
|
||||
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
//
|
||||
// TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1)
|
||||
// instead of assuming that output is image ID if options.quiet.
|
||||
if options.quiet {
|
||||
imageID = buf.String()
|
||||
fmt.Fprint(dockerCli.Out(), imageID)
|
||||
}
|
||||
|
||||
if options.imageIDFile != "" {
|
||||
if imageID == "" {
|
||||
return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile)
|
||||
}
|
||||
imageID = strings.TrimSpace(imageID)
|
||||
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
|
||||
return errors.Wrap(err, "cannot write image ID file")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/manifest/store"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -64,20 +65,23 @@ func runManifestAnnotate(dockerCli command.Cli, opts annotateOptions) error {
|
||||
}
|
||||
|
||||
// Update the mf
|
||||
if imageManifest.Descriptor.Platform == nil {
|
||||
imageManifest.Descriptor.Platform = new(ocispec.Platform)
|
||||
}
|
||||
if opts.os != "" {
|
||||
imageManifest.Platform.OS = opts.os
|
||||
imageManifest.Descriptor.Platform.OS = opts.os
|
||||
}
|
||||
if opts.arch != "" {
|
||||
imageManifest.Platform.Architecture = opts.arch
|
||||
imageManifest.Descriptor.Platform.Architecture = opts.arch
|
||||
}
|
||||
for _, osFeature := range opts.osFeatures {
|
||||
imageManifest.Platform.OSFeatures = appendIfUnique(imageManifest.Platform.OSFeatures, osFeature)
|
||||
imageManifest.Descriptor.Platform.OSFeatures = appendIfUnique(imageManifest.Descriptor.Platform.OSFeatures, osFeature)
|
||||
}
|
||||
if opts.variant != "" {
|
||||
imageManifest.Platform.Variant = opts.variant
|
||||
imageManifest.Descriptor.Platform.Variant = opts.variant
|
||||
}
|
||||
|
||||
if !isValidOSArch(imageManifest.Platform.OS, imageManifest.Platform.Architecture) {
|
||||
if !isValidOSArch(imageManifest.Descriptor.Platform.OS, imageManifest.Descriptor.Platform.Architecture) {
|
||||
return errors.Errorf("manifest entry for image has unsupported os/arch combination: %s/%s", opts.os, opts.arch)
|
||||
}
|
||||
return manifestStore.Save(targetRef, imgRef, imageManifest)
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -123,7 +124,7 @@ func printManifestList(dockerCli command.Cli, namedRef reference.Named, list []t
|
||||
for _, img := range list {
|
||||
mfd, err := buildManifestDescriptor(targetRepo, img)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error assembling ManifestDescriptor")
|
||||
return errors.Wrap(err, "failed to assemble ManifestDescriptor")
|
||||
}
|
||||
manifests = append(manifests, mfd)
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
@ -50,8 +51,22 @@ func fullImageManifest(t *testing.T, ref reference.Named) types.ImageManifest {
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
// TODO: include image data for verbose inspect
|
||||
return types.NewImageManifest(ref, digest.Digest("sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d62abcd"), types.Image{OS: "linux", Architecture: "amd64"}, man)
|
||||
mt, raw, err := man.Payload()
|
||||
assert.NilError(t, err)
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: digest.FromBytes(raw),
|
||||
Size: int64(len(raw)),
|
||||
MediaType: mt,
|
||||
Platform: &ocispec.Platform{
|
||||
Architecture: "amd64",
|
||||
OS: "linux",
|
||||
},
|
||||
}
|
||||
|
||||
return types.NewImageManifest(ref, desc, man)
|
||||
}
|
||||
|
||||
func TestInspectCommandLocalManifestNotFound(t *testing.T) {
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/manifest/types"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
@ -141,7 +142,9 @@ func buildManifestList(manifests []types.ImageManifest, targetRef reference.Name
|
||||
|
||||
descriptors := []manifestlist.ManifestDescriptor{}
|
||||
for _, imageManifest := range manifests {
|
||||
if imageManifest.Platform.Architecture == "" || imageManifest.Platform.OS == "" {
|
||||
if imageManifest.Descriptor.Platform == nil ||
|
||||
imageManifest.Descriptor.Platform.Architecture == "" ||
|
||||
imageManifest.Descriptor.Platform.OS == "" {
|
||||
return nil, errors.Errorf(
|
||||
"manifest %s must have an OS and Architecture to be pushed to a registry", imageManifest.Ref)
|
||||
}
|
||||
@ -167,17 +170,18 @@ func buildManifestDescriptor(targetRepo *registry.RepositoryInfo, imageManifest
|
||||
return manifestlist.ManifestDescriptor{}, errors.Errorf("cannot use source images from a different registry than the target image: %s != %s", manifestRepoHostname, targetRepoHostname)
|
||||
}
|
||||
|
||||
mediaType, raw, err := imageManifest.Payload()
|
||||
if err != nil {
|
||||
return manifestlist.ManifestDescriptor{}, err
|
||||
manifest := manifestlist.ManifestDescriptor{
|
||||
Descriptor: distribution.Descriptor{
|
||||
Digest: imageManifest.Descriptor.Digest,
|
||||
Size: imageManifest.Descriptor.Size,
|
||||
MediaType: imageManifest.Descriptor.MediaType,
|
||||
},
|
||||
}
|
||||
|
||||
manifest := manifestlist.ManifestDescriptor{
|
||||
Platform: imageManifest.Platform,
|
||||
platform := types.PlatformSpecFromOCI(imageManifest.Descriptor.Platform)
|
||||
if platform != nil {
|
||||
manifest.Platform = *platform
|
||||
}
|
||||
manifest.Descriptor.Digest = imageManifest.Digest
|
||||
manifest.Size = int64(len(raw))
|
||||
manifest.MediaType = mediaType
|
||||
|
||||
if err = manifest.Descriptor.Digest.Validate(); err != nil {
|
||||
return manifestlist.ManifestDescriptor{}, errors.Wrapf(err,
|
||||
@ -195,7 +199,11 @@ func buildBlobRequestList(imageManifest types.ImageManifest, repoName reference.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobReqs = append(blobReqs, manifestBlob{canonical: canonical, os: imageManifest.Platform.OS})
|
||||
var os string
|
||||
if imageManifest.Descriptor.Platform != nil {
|
||||
os = imageManifest.Descriptor.Platform.OS
|
||||
}
|
||||
blobReqs = append(blobReqs, manifestBlob{canonical: canonical, os: os})
|
||||
}
|
||||
return blobReqs, nil
|
||||
}
|
||||
@ -206,7 +214,7 @@ func buildPutManifestRequest(imageManifest types.ImageManifest, targetRef refere
|
||||
if err != nil {
|
||||
return mountRequest{}, err
|
||||
}
|
||||
mountRef, err := reference.WithDigest(refWithoutTag, imageManifest.Digest)
|
||||
mountRef, err := reference.WithDigest(refWithoutTag, imageManifest.Descriptor.Digest)
|
||||
if err != nil {
|
||||
return mountRequest{}, err
|
||||
}
|
||||
|
||||
@ -1,6 +1,18 @@
|
||||
{
|
||||
"Ref": "example.com/alpine:3.0",
|
||||
"Digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d62abcd",
|
||||
"Descriptor": {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe",
|
||||
"size": 528,
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "freebsd",
|
||||
"os.features": [
|
||||
"feature1"
|
||||
],
|
||||
"variant": "v7"
|
||||
}
|
||||
},
|
||||
"SchemaV2Manifest": {
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
@ -16,13 +28,5 @@
|
||||
"digest": "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Platform": {
|
||||
"architecture": "arm",
|
||||
"os": "freebsd",
|
||||
"os.features": [
|
||||
"feature1"
|
||||
],
|
||||
"variant": "v7"
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,8 +4,8 @@
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 428,
|
||||
"digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d62abcd",
|
||||
"size": 528,
|
||||
"digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
@ -13,8 +13,8 @@
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 428,
|
||||
"digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d62abcd",
|
||||
"size": 528,
|
||||
"digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
@ -27,7 +28,11 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "Manage Docker stacks",
|
||||
Args: cli.NoArgs,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
orchestrator, err := getOrchestrator(dockerCli.ConfigFile(), cmd, dockerCli.Err())
|
||||
configFile := dockerCli.ConfigFile()
|
||||
if configFile == nil {
|
||||
configFile = cliconfig.LoadDefaultConfigFile(dockerCli.Err())
|
||||
}
|
||||
orchestrator, err := getOrchestrator(configFile, cmd, dockerCli.Err())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -42,9 +47,13 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
||||
},
|
||||
}
|
||||
defaultHelpFunc := cmd.HelpFunc()
|
||||
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
|
||||
hideOrchestrationFlags(cmd, opts.orchestrator)
|
||||
defaultHelpFunc(cmd, args)
|
||||
cmd.SetHelpFunc(func(c *cobra.Command, args []string) {
|
||||
if err := cmd.PersistentPreRunE(c, args); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
return
|
||||
}
|
||||
hideOrchestrationFlags(c, opts.orchestrator)
|
||||
defaultHelpFunc(c, args)
|
||||
})
|
||||
cmd.AddCommand(
|
||||
newDeployCommand(dockerCli, &opts),
|
||||
|
||||
@ -58,7 +58,7 @@ func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Comma
|
||||
flags.StringVar(&opts.Bundlefile, "bundle-file", "", "Path to a Distributed Application Bundle file")
|
||||
flags.SetAnnotation("bundle-file", "experimental", nil)
|
||||
flags.SetAnnotation("bundle-file", "swarm", nil)
|
||||
flags.StringSliceVarP(&opts.Composefiles, "compose-file", "c", []string{}, "Path to a Compose file")
|
||||
flags.StringSliceVarP(&opts.Composefiles, "compose-file", "c", []string{}, `Path to a Compose file, or "-" to read from stdin`)
|
||||
flags.SetAnnotation("compose-file", "version", []string{"1.25"})
|
||||
flags.BoolVar(&opts.SendRegistryAuth, "with-registry-auth", false, "Send registry authentication details to Swarm agents")
|
||||
flags.SetAnnotation("with-registry-auth", "swarm", nil)
|
||||
|
||||
@ -8,16 +8,11 @@ import (
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RunDeploy is the kubernetes implementation of docker stack deploy
|
||||
func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error {
|
||||
cmdOut := dockerCli.Out()
|
||||
// Check arguments
|
||||
if len(opts.Composefiles) == 0 {
|
||||
return errors.Errorf("Please specify only one compose file (with --compose-file).")
|
||||
}
|
||||
|
||||
// Initialize clients
|
||||
composeClient, err := dockerCli.composeClient()
|
||||
|
||||
@ -68,6 +68,17 @@ func runCA(dockerCli command.Cli, flags *pflag.FlagSet, opts caOptions) error {
|
||||
return displayTrustRoot(dockerCli.Out(), swarmInspect)
|
||||
}
|
||||
|
||||
if flags.Changed(flagExternalCA) && len(opts.externalCA.Value()) > 0 && !flags.Changed(flagCACert) {
|
||||
return fmt.Errorf(
|
||||
"rotating to an external CA requires the `--%s` flag to specify the external CA's cert - "+
|
||||
"to add an external CA with the current root CA certificate, use the `update` command instead", flagCACert)
|
||||
}
|
||||
|
||||
if flags.Changed(flagCACert) && len(opts.externalCA.Value()) == 0 && !flags.Changed(flagCAKey) {
|
||||
return fmt.Errorf("the --%s flag requires that a --%s flag and/or --%s flag be provided as well",
|
||||
flagCACert, flagCAKey, flagExternalCA)
|
||||
}
|
||||
|
||||
updateSwarmSpec(&swarmInspect.Spec, flags, opts)
|
||||
if err := client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, swarm.UpdateFlags{}); err != nil {
|
||||
return err
|
||||
@ -80,20 +91,15 @@ func runCA(dockerCli command.Cli, flags *pflag.FlagSet, opts caOptions) error {
|
||||
}
|
||||
|
||||
func updateSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, opts caOptions) {
|
||||
opts.mergeSwarmSpecCAFlags(spec, flags)
|
||||
caCert := opts.rootCACert.Contents()
|
||||
caKey := opts.rootCAKey.Contents()
|
||||
opts.mergeSwarmSpecCAFlags(spec, flags, caCert)
|
||||
|
||||
spec.CAConfig.SigningCACert = caCert
|
||||
spec.CAConfig.SigningCAKey = caKey
|
||||
|
||||
if caCert != "" {
|
||||
spec.CAConfig.SigningCACert = caCert
|
||||
}
|
||||
if caKey != "" {
|
||||
spec.CAConfig.SigningCAKey = caKey
|
||||
}
|
||||
if caKey == "" && caCert == "" {
|
||||
spec.CAConfig.ForceRotate++
|
||||
spec.CAConfig.SigningCACert = ""
|
||||
spec.CAConfig.SigningCAKey = ""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -13,6 +13,28 @@ import (
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
const (
|
||||
cert = `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBuDCCAV4CCQDOqUYOWdqMdjAKBggqhkjOPQQDAzBjMQswCQYDVQQGEwJVUzEL
|
||||
MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv
|
||||
Y2tlcjEPMA0GA1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MCAXDTE4MDcwMjIx
|
||||
MjkxOFoYDzMwMTcxMTAyMjEyOTE4WjBjMQswCQYDVQQGEwJVUzELMAkGA1UECAwC
|
||||
Q0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRvY2tlcjEPMA0G
|
||||
A1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MFkwEwYHKoZIzj0CAQYIKoZIzj0D
|
||||
AQcDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZTozu
|
||||
s9ZZZA8tzUhIqS36gsFuyIZ4YiAlyjAKBggqhkjOPQQDAwNIADBFAiBQ7pCPQrj8
|
||||
8zaItMf0pk8j1NU5XrFqFEZICzvjzUJQBAIhAKq2gFwoTn8KH+cAAXZpAGJPmOsT
|
||||
zsBT8gBAOHhNA6/2
|
||||
-----END CERTIFICATE-----`
|
||||
key = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEICyheZpw70pbgO4hEuwhZTETWyTpNJmJ3TyFaWT6WTRkoAoGCCqGSM49
|
||||
AwEHoUQDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZ
|
||||
Tozus9ZZZA8tzUhIqS36gsFuyIZ4YiAlyg==
|
||||
-----END EC PRIVATE KEY-----`
|
||||
)
|
||||
|
||||
func swarmSpecWithFullCAConfig() *swarm.Spec {
|
||||
return &swarm.Spec{
|
||||
CAConfig: swarm.CAConfig{
|
||||
@ -37,51 +59,79 @@ func TestDisplayTrustRootNoRoot(t *testing.T) {
|
||||
assert.Error(t, err, "No CA information available")
|
||||
}
|
||||
|
||||
type invalidCATestCases struct {
|
||||
args []string
|
||||
errorMsg string
|
||||
}
|
||||
|
||||
func writeFile(data string) (string, error) {
|
||||
tmpfile, err := ioutil.TempFile("", "testfile")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = tmpfile.Write([]byte(data))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tmpfile.Close()
|
||||
return tmpfile.Name(), nil
|
||||
}
|
||||
|
||||
func TestDisplayTrustRootInvalidFlags(t *testing.T) {
|
||||
// we need an actual PEMfile to test
|
||||
tmpfile, err := ioutil.TempFile("", "pemfile")
|
||||
tmpfile, err := writeFile(cert)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
tmpfile.Write([]byte(`
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBajCCARCgAwIBAgIUe0+jYWhxN8fFOByC7yveIYgvx1kwCgYIKoZIzj0EAwIw
|
||||
EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNjI3MTUxNDAwWhcNMzcwNjIyMTUx
|
||||
NDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
|
||||
A0IABGgbOZLd7b4b262+6m4ignIecbAZKim6djNiIS1Kl5IHciXYn7gnSpsayjn7
|
||||
GQABpgkdPeM9TEQowmtR1qSnORujQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
|
||||
Af8EBTADAQH/MB0GA1UdDgQWBBQ6Rtcn823/fxRZyheRDFpDzuBMpTAKBggqhkjO
|
||||
PQQDAgNIADBFAiEAqD3Kb2rgsy6NoTk+zEgcUi/aGBCsvQDG3vML1PXN8j0CIBjj
|
||||
4nDj+GmHXcnKa8wXx70Z8OZEpRQIiKDDLmcXuslp
|
||||
-----END CERTIFICATE-----
|
||||
`))
|
||||
tmpfile.Close()
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
errorTestCases := [][]string{
|
||||
errorTestCases := []invalidCATestCases{
|
||||
{
|
||||
"--ca-cert=" + tmpfile.Name(),
|
||||
args: []string{"--ca-cert=" + tmpfile},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
"--ca-key=" + tmpfile.Name(),
|
||||
args: []string{"--ca-key=" + tmpfile},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{ // to make sure we're not erroring because we didn't provide a CA key along with the CA cert
|
||||
|
||||
"--ca-cert=" + tmpfile.Name(),
|
||||
"--ca-key=" + tmpfile.Name(),
|
||||
args: []string{
|
||||
"--ca-cert=" + tmpfile,
|
||||
"--ca-key=" + tmpfile,
|
||||
},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
"--cert-expiry=2160h0m0s",
|
||||
args: []string{"--cert-expiry=2160h0m0s"},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
args: []string{"--external-ca=protocol=cfssl,url=https://some.com/https/url"},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{ // to make sure we're not erroring because we didn't provide a CA cert and external CA
|
||||
|
||||
"--ca-cert=" + tmpfile.Name(),
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
args: []string{
|
||||
"--ca-cert=" + tmpfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--rotate",
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
},
|
||||
errorMsg: "rotating to an external CA requires the `--ca-cert` flag to specify the external CA's cert - " +
|
||||
"to add an external CA with the current root CA certificate, use the `update` command instead",
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--rotate",
|
||||
"--ca-cert=" + tmpfile,
|
||||
},
|
||||
errorMsg: "the --ca-cert flag requires that a --ca-key flag and/or --external-ca flag be provided as well",
|
||||
},
|
||||
}
|
||||
|
||||
for _, args := range errorTestCases {
|
||||
for _, testCase := range errorTestCases {
|
||||
cmd := newCACommand(
|
||||
test.NewFakeCli(&fakeClient{
|
||||
swarmInspectFunc: func() (swarm.Swarm, error) {
|
||||
@ -94,9 +144,9 @@ PQQDAgNIADBFAiEAqD3Kb2rgsy6NoTk+zEgcUi/aGBCsvQDG3vML1PXN8j0CIBjj
|
||||
}, nil
|
||||
},
|
||||
}))
|
||||
assert.Check(t, cmd.Flags().Parse(args))
|
||||
assert.Check(t, cmd.Flags().Parse(testCase.args))
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
assert.ErrorContains(t, cmd.Execute(), "flag requires the `--rotate` flag to update the CA")
|
||||
assert.ErrorContains(t, cmd.Execute(), testCase.errorMsg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,43 +162,139 @@ func TestDisplayTrustRoot(t *testing.T) {
|
||||
assert.Check(t, is.Equal(trustRoot+"\n", buffer.String()))
|
||||
}
|
||||
|
||||
type swarmUpdateRecorder struct {
|
||||
spec swarm.Spec
|
||||
}
|
||||
|
||||
func (s *swarmUpdateRecorder) swarmUpdate(sp swarm.Spec, _ swarm.UpdateFlags) error {
|
||||
s.spec = sp
|
||||
return nil
|
||||
}
|
||||
|
||||
func swarmInspectFuncWithFullCAConfig() (swarm.Swarm, error) {
|
||||
return swarm.Swarm{
|
||||
ClusterInfo: swarm.ClusterInfo{
|
||||
Spec: *swarmSpecWithFullCAConfig(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestUpdateSwarmSpecDefaultRotate(t *testing.T) {
|
||||
spec := swarmSpecWithFullCAConfig()
|
||||
flags := newCACommand(nil).Flags()
|
||||
updateSwarmSpec(spec, flags, caOptions{})
|
||||
s := &swarmUpdateRecorder{}
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
swarmInspectFunc: swarmInspectFuncWithFullCAConfig,
|
||||
swarmUpdateFunc: s.swarmUpdate,
|
||||
})
|
||||
cmd := newCACommand(cli)
|
||||
cmd.SetArgs([]string{"--rotate", "--detach"})
|
||||
cmd.SetOutput(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
expected := swarmSpecWithFullCAConfig()
|
||||
expected.CAConfig.ForceRotate = 2
|
||||
expected.CAConfig.SigningCACert = ""
|
||||
expected.CAConfig.SigningCAKey = ""
|
||||
assert.Check(t, is.DeepEqual(expected, spec))
|
||||
assert.Check(t, is.DeepEqual(*expected, s.spec))
|
||||
}
|
||||
|
||||
func TestUpdateSwarmSpecPartial(t *testing.T) {
|
||||
spec := swarmSpecWithFullCAConfig()
|
||||
flags := newCACommand(nil).Flags()
|
||||
updateSwarmSpec(spec, flags, caOptions{
|
||||
rootCACert: PEMFile{contents: "cacert"},
|
||||
func TestUpdateSwarmSpecCertAndKey(t *testing.T) {
|
||||
certfile, err := writeFile(cert)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(certfile)
|
||||
|
||||
keyfile, err := writeFile(key)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(keyfile)
|
||||
|
||||
s := &swarmUpdateRecorder{}
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
swarmInspectFunc: swarmInspectFuncWithFullCAConfig,
|
||||
swarmUpdateFunc: s.swarmUpdate,
|
||||
})
|
||||
cmd := newCACommand(cli)
|
||||
cmd.SetArgs([]string{
|
||||
"--rotate",
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--ca-key=" + keyfile,
|
||||
"--cert-expiry=3m"})
|
||||
cmd.SetOutput(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
expected := swarmSpecWithFullCAConfig()
|
||||
expected.CAConfig.SigningCACert = "cacert"
|
||||
assert.Check(t, is.DeepEqual(expected, spec))
|
||||
}
|
||||
|
||||
func TestUpdateSwarmSpecFullFlags(t *testing.T) {
|
||||
flags := newCACommand(nil).Flags()
|
||||
flags.Lookup(flagCertExpiry).Changed = true
|
||||
spec := swarmSpecWithFullCAConfig()
|
||||
updateSwarmSpec(spec, flags, caOptions{
|
||||
rootCACert: PEMFile{contents: "cacert"},
|
||||
rootCAKey: PEMFile{contents: "cakey"},
|
||||
swarmCAOptions: swarmCAOptions{nodeCertExpiry: 3 * time.Minute},
|
||||
})
|
||||
|
||||
expected := swarmSpecWithFullCAConfig()
|
||||
expected.CAConfig.SigningCACert = "cacert"
|
||||
expected.CAConfig.SigningCAKey = "cakey"
|
||||
expected.CAConfig.SigningCACert = cert
|
||||
expected.CAConfig.SigningCAKey = key
|
||||
expected.CAConfig.NodeCertExpiry = 3 * time.Minute
|
||||
assert.Check(t, is.DeepEqual(expected, spec))
|
||||
assert.Check(t, is.DeepEqual(*expected, s.spec))
|
||||
}
|
||||
|
||||
func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) {
|
||||
certfile, err := writeFile(cert)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(certfile)
|
||||
|
||||
s := &swarmUpdateRecorder{}
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
swarmInspectFunc: swarmInspectFuncWithFullCAConfig,
|
||||
swarmUpdateFunc: s.swarmUpdate,
|
||||
})
|
||||
cmd := newCACommand(cli)
|
||||
cmd.SetArgs([]string{
|
||||
"--rotate",
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
cmd.SetOutput(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
expected := swarmSpecWithFullCAConfig()
|
||||
expected.CAConfig.SigningCACert = cert
|
||||
expected.CAConfig.SigningCAKey = ""
|
||||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(*expected, s.spec))
|
||||
}
|
||||
|
||||
func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) {
|
||||
certfile, err := writeFile(cert)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(certfile)
|
||||
|
||||
keyfile, err := writeFile(key)
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(keyfile)
|
||||
|
||||
s := &swarmUpdateRecorder{}
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
swarmInspectFunc: swarmInspectFuncWithFullCAConfig,
|
||||
swarmUpdateFunc: s.swarmUpdate,
|
||||
})
|
||||
cmd := newCACommand(cli)
|
||||
cmd.SetArgs([]string{
|
||||
"--rotate",
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--ca-key=" + keyfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
cmd.SetOutput(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
expected := swarmSpecWithFullCAConfig()
|
||||
expected.CAConfig.SigningCACert = cert
|
||||
expected.CAConfig.SigningCAKey = key
|
||||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(*expected, s.spec))
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
||||
addSwarmCAFlags(flags, &opts.swarmCAOptions)
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) {
|
||||
func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
if flags.Changed(flagTaskHistoryLimit) {
|
||||
spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit
|
||||
}
|
||||
@ -246,7 +246,7 @@ func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet)
|
||||
if flags.Changed(flagAutolock) {
|
||||
spec.EncryptionConfig.AutoLockManagers = opts.autolock
|
||||
}
|
||||
opts.mergeSwarmSpecCAFlags(spec, flags)
|
||||
opts.mergeSwarmSpecCAFlags(spec, flags, caCert)
|
||||
}
|
||||
|
||||
type swarmCAOptions struct {
|
||||
@ -254,17 +254,20 @@ type swarmCAOptions struct {
|
||||
externalCA ExternalCAOption
|
||||
}
|
||||
|
||||
func (opts *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet) {
|
||||
func (opts *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
if flags.Changed(flagCertExpiry) {
|
||||
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
|
||||
}
|
||||
if flags.Changed(flagExternalCA) {
|
||||
spec.CAConfig.ExternalCAs = opts.externalCA.Value()
|
||||
for _, ca := range spec.CAConfig.ExternalCAs {
|
||||
ca.CACert = caCert
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec {
|
||||
var spec swarm.Spec
|
||||
opts.mergeSwarmSpec(&spec, flags)
|
||||
opts.mergeSwarmSpec(&spec, flags, "")
|
||||
return spec
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, opts swarmOptions) e
|
||||
|
||||
prevAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers
|
||||
|
||||
opts.mergeSwarmSpec(&swarmInspect.Spec, flags)
|
||||
opts.mergeSwarmSpec(&swarmInspect.Spec, flags, swarmInspect.ClusterInfo.TLSInfo.TrustRoot)
|
||||
|
||||
curAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
// Import builders to get the builder function as package function
|
||||
. "github.com/docker/cli/internal/test/builders"
|
||||
"gotest.tools/assert"
|
||||
@ -82,6 +83,9 @@ func TestSwarmUpdateErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSwarmUpdate(t *testing.T) {
|
||||
swarmInfo := Swarm()
|
||||
swarmInfo.ClusterInfo.TLSInfo.TrustRoot = "trustroot"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
@ -105,6 +109,9 @@ func TestSwarmUpdate(t *testing.T) {
|
||||
flagAutolock: "true",
|
||||
flagQuiet: "true",
|
||||
},
|
||||
swarmInspectFunc: func() (swarm.Swarm, error) {
|
||||
return *swarmInfo, nil
|
||||
},
|
||||
swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error {
|
||||
if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 {
|
||||
return errors.Errorf("historyLimit not correctly set")
|
||||
@ -123,7 +130,7 @@ func TestSwarmUpdate(t *testing.T) {
|
||||
if swarm.CAConfig.NodeCertExpiry != certExpiryDuration {
|
||||
return errors.Errorf("certExpiry not correctly set")
|
||||
}
|
||||
if len(swarm.CAConfig.ExternalCAs) != 1 {
|
||||
if len(swarm.CAConfig.ExternalCAs) != 1 || swarm.CAConfig.ExternalCAs[0].CACert != "trustroot" {
|
||||
return errors.Errorf("externalCA not correctly set")
|
||||
}
|
||||
if *swarm.Raft.KeepOldSnapshots != 10 {
|
||||
|
||||
@ -1,8 +1,44 @@
|
||||
Client:
|
||||
Version: 18.99.5-ce
|
||||
API version: 1.38
|
||||
Go version: go1.10.2
|
||||
Git commit: deadbeef
|
||||
Built: Wed May 30 22:21:05 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: true
|
||||
Version: 18.99.5-ce
|
||||
API version: 1.38
|
||||
Go version: go1.10.2
|
||||
Git commit: deadbeef
|
||||
Built: Wed May 30 22:21:05 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: true
|
||||
|
||||
Server: Docker Enterprise Edition (EE) 2.0
|
||||
Engine:
|
||||
Version: 17.06.2-ee-15
|
||||
API version: 1.30 (minimum version 1.12)
|
||||
Go version: go1.8.7
|
||||
Git commit: 64ddfa6
|
||||
Built: Mon Jul 9 23:38:38 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
Universal Control Plane:
|
||||
Version: 17.06.2-ee-15
|
||||
ApiVersion: 1.30
|
||||
Arch: amd64
|
||||
BuildTime: Mon Jul 2 21:24:07 UTC 2018
|
||||
GitCommit: 4513922
|
||||
GoVersion: go1.9.4
|
||||
MinApiVersion: 1.20
|
||||
Os: linux
|
||||
Version: 3.0.3-tp2
|
||||
Kubernetes:
|
||||
Version: 1.8+
|
||||
buildDate: 2018-04-26T16:51:21Z
|
||||
compiler: gc
|
||||
gitCommit: 8d637aedf46b9c21dde723e29c645b9f27106fa5
|
||||
gitTreeState: clean
|
||||
gitVersion: v1.8.11-docker-8d637ae
|
||||
goVersion: go1.8.3
|
||||
major: 1
|
||||
minor: 8+
|
||||
platform: linux/amd64
|
||||
Calico:
|
||||
Version: v3.0.8
|
||||
cni: v2.0.6
|
||||
kube-controllers: v2.0.5
|
||||
node: v3.0.8
|
||||
|
||||
@ -204,7 +204,7 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
||||
}
|
||||
|
||||
func prettyPrintVersion(dockerCli command.Cli, vd versionInfo, tmpl *template.Template) error {
|
||||
t := tabwriter.NewWriter(dockerCli.Out(), 15, 1, 1, ' ', 0)
|
||||
t := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 1, ' ', 0)
|
||||
err := tmpl.Execute(t, vd)
|
||||
t.Write([]byte("\n"))
|
||||
t.Flush()
|
||||
|
||||
@ -43,8 +43,67 @@ func TestVersionAlign(t *testing.T) {
|
||||
BuildTime: "Wed May 30 22:21:05 2018",
|
||||
Experimental: true,
|
||||
},
|
||||
Server: &types.Version{},
|
||||
}
|
||||
|
||||
vi.Server.Platform.Name = "Docker Enterprise Edition (EE) 2.0"
|
||||
|
||||
vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{
|
||||
Name: "Engine",
|
||||
Version: "17.06.2-ee-15",
|
||||
Details: map[string]string{
|
||||
"ApiVersion": "1.30",
|
||||
"MinAPIVersion": "1.12",
|
||||
"GitCommit": "64ddfa6",
|
||||
"GoVersion": "go1.8.7",
|
||||
"Os": "linux",
|
||||
"Arch": "amd64",
|
||||
"BuildTime": "Mon Jul 9 23:38:38 2018",
|
||||
"Experimental": "false",
|
||||
},
|
||||
})
|
||||
|
||||
vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{
|
||||
Name: "Universal Control Plane",
|
||||
Version: "17.06.2-ee-15",
|
||||
Details: map[string]string{
|
||||
"Version": "3.0.3-tp2",
|
||||
"ApiVersion": "1.30",
|
||||
"Arch": "amd64",
|
||||
"BuildTime": "Mon Jul 2 21:24:07 UTC 2018",
|
||||
"GitCommit": "4513922",
|
||||
"GoVersion": "go1.9.4",
|
||||
"MinApiVersion": "1.20",
|
||||
"Os": "linux",
|
||||
},
|
||||
})
|
||||
|
||||
vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{
|
||||
Name: "Kubernetes",
|
||||
Version: "1.8+",
|
||||
Details: map[string]string{
|
||||
"buildDate": "2018-04-26T16:51:21Z",
|
||||
"compiler": "gc",
|
||||
"gitCommit": "8d637aedf46b9c21dde723e29c645b9f27106fa5",
|
||||
"gitTreeState": "clean",
|
||||
"gitVersion": "v1.8.11-docker-8d637ae",
|
||||
"goVersion": "go1.8.3",
|
||||
"major": "1",
|
||||
"minor": "8+",
|
||||
"platform": "linux/amd64",
|
||||
},
|
||||
})
|
||||
|
||||
vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{
|
||||
Name: "Calico",
|
||||
Version: "v3.0.8",
|
||||
Details: map[string]string{
|
||||
"cni": "v2.0.6",
|
||||
"kube-controllers": "v2.0.5",
|
||||
"node": "v3.0.8",
|
||||
},
|
||||
})
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
tmpl, err := newVersionTemplate("")
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -96,12 +96,12 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
}
|
||||
file, err := os.Open(confFile)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, confFile)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LegacyLoadFromReader(file)
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, confFile)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker-credential-helpers/pass"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
if pass.PassInitialized {
|
||||
if _, err := exec.LookPath("pass"); err == nil {
|
||||
return "pass"
|
||||
}
|
||||
|
||||
|
||||
@ -9,7 +9,11 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/reference"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Store manages local storage of image distribution manifests
|
||||
@ -50,8 +54,37 @@ func (s *fsStore) getFromFilename(ref reference.Reference, filename string) (typ
|
||||
case err != nil:
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
var manifestInfo types.ImageManifest
|
||||
return manifestInfo, json.Unmarshal(bytes, &manifestInfo)
|
||||
var manifestInfo struct {
|
||||
types.ImageManifest
|
||||
|
||||
// Deprecated Fields, replaced by Descriptor
|
||||
Digest digest.Digest
|
||||
Platform *manifestlist.PlatformSpec
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(bytes, &manifestInfo); err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
// Compatibility with image manifests created before
|
||||
// descriptor, newer versions omit Digest and Platform
|
||||
if manifestInfo.Digest != "" {
|
||||
mediaType, raw, err := manifestInfo.Payload()
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
if dgst := digest.FromBytes(raw); dgst != manifestInfo.Digest {
|
||||
return types.ImageManifest{}, errors.Errorf("invalid manifest file %v: image manifest digest mismatch (%v != %v)", filename, manifestInfo.Digest, dgst)
|
||||
}
|
||||
manifestInfo.ImageManifest.Descriptor = ocispec.Descriptor{
|
||||
Digest: manifestInfo.Digest,
|
||||
Size: int64(len(raw)),
|
||||
MediaType: mediaType,
|
||||
Platform: types.OCIPlatform(manifestInfo.Platform),
|
||||
}
|
||||
}
|
||||
|
||||
return manifestInfo.ImageManifest, nil
|
||||
}
|
||||
|
||||
// GetList returns all the local manifests for a transaction
|
||||
|
||||
@ -8,15 +8,46 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageManifest contains info to output for a manifest object.
|
||||
type ImageManifest struct {
|
||||
Ref *SerializableNamed
|
||||
Digest digest.Digest
|
||||
Ref *SerializableNamed
|
||||
Descriptor ocispec.Descriptor
|
||||
|
||||
// SchemaV2Manifest is used for inspection
|
||||
// TODO: Deprecate this and store manifest blobs
|
||||
SchemaV2Manifest *schema2.DeserializedManifest `json:",omitempty"`
|
||||
Platform manifestlist.PlatformSpec
|
||||
}
|
||||
|
||||
// OCIPlatform creates an OCI platform from a manifest list platform spec
|
||||
func OCIPlatform(ps *manifestlist.PlatformSpec) *ocispec.Platform {
|
||||
if ps == nil {
|
||||
return nil
|
||||
}
|
||||
return &ocispec.Platform{
|
||||
Architecture: ps.Architecture,
|
||||
OS: ps.OS,
|
||||
OSVersion: ps.OSVersion,
|
||||
OSFeatures: ps.OSFeatures,
|
||||
Variant: ps.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// PlatformSpecFromOCI creates a platform spec from OCI platform
|
||||
func PlatformSpecFromOCI(p *ocispec.Platform) *manifestlist.PlatformSpec {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &manifestlist.PlatformSpec{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
OSVersion: p.OSVersion,
|
||||
OSFeatures: p.OSFeatures,
|
||||
Variant: p.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// Blobs returns the digests for all the blobs referenced by this manifest
|
||||
@ -30,6 +61,7 @@ func (i ImageManifest) Blobs() []digest.Digest {
|
||||
|
||||
// Payload returns the media type and bytes for the manifest
|
||||
func (i ImageManifest) Payload() (string, []byte, error) {
|
||||
// TODO: If available, read content from a content store by digest
|
||||
switch {
|
||||
case i.SchemaV2Manifest != nil:
|
||||
return i.SchemaV2Manifest.Payload()
|
||||
@ -51,18 +83,11 @@ func (i ImageManifest) References() []distribution.Descriptor {
|
||||
|
||||
// NewImageManifest returns a new ImageManifest object. The values for Platform
|
||||
// are initialized from those in the image
|
||||
func NewImageManifest(ref reference.Named, digest digest.Digest, img Image, manifest *schema2.DeserializedManifest) ImageManifest {
|
||||
platform := manifestlist.PlatformSpec{
|
||||
OS: img.OS,
|
||||
Architecture: img.Architecture,
|
||||
OSVersion: img.OSVersion,
|
||||
OSFeatures: img.OSFeatures,
|
||||
}
|
||||
func NewImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *schema2.DeserializedManifest) ImageManifest {
|
||||
return ImageManifest{
|
||||
Ref: &SerializableNamed{Named: ref},
|
||||
Digest: digest,
|
||||
Descriptor: desc,
|
||||
SchemaV2Manifest: manifest,
|
||||
Platform: platform,
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,21 +112,3 @@ func (s *SerializableNamed) UnmarshalJSON(b []byte) error {
|
||||
func (s *SerializableNamed) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.String())
|
||||
}
|
||||
|
||||
// Image is the minimal set of fields required to set default platform settings
|
||||
// on a manifest.
|
||||
type Image struct {
|
||||
Architecture string `json:"architecture,omitempty"`
|
||||
OS string `json:"os,omitempty"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
}
|
||||
|
||||
// NewImageFromJSON creates an Image configuration from json.
|
||||
func NewImageFromJSON(src []byte) (*Image, error) {
|
||||
img := &Image{}
|
||||
if err := json.Unmarshal(src, img); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli/manifest/types"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
distclient "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/registry"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -72,7 +74,7 @@ func getManifest(ctx context.Context, repo distribution.Repository, ref referenc
|
||||
}
|
||||
|
||||
func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst schema2.DeserializedManifest) (types.ImageManifest, error) {
|
||||
manifestDigest, err := validateManifestDigest(ref, mfst)
|
||||
manifestDesc, err := validateManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
@ -81,11 +83,16 @@ func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distrib
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
|
||||
img, err := types.NewImageFromJSON(configJSON)
|
||||
if err != nil {
|
||||
if manifestDesc.Platform == nil {
|
||||
manifestDesc.Platform = &ocispec.Platform{}
|
||||
}
|
||||
|
||||
// Fill in os and architecture fields from config JSON
|
||||
if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
|
||||
return types.ImageManifest{}, err
|
||||
}
|
||||
return types.NewImageManifest(ref, manifestDigest, *img, &mfst), nil
|
||||
|
||||
return types.NewImageManifest(ref, manifestDesc, &mfst), nil
|
||||
}
|
||||
|
||||
func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, repo distribution.Repository) ([]byte, error) {
|
||||
@ -110,29 +117,26 @@ func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, re
|
||||
|
||||
// validateManifestDigest computes the manifest digest, and, if pulling by
|
||||
// digest, ensures that it matches the requested digest.
|
||||
func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
|
||||
_, canonical, err := mfst.Payload()
|
||||
func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (ocispec.Descriptor, error) {
|
||||
mediaType, canonical, err := mfst.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: digest.FromBytes(canonical),
|
||||
Size: int64(len(canonical)),
|
||||
MediaType: mediaType,
|
||||
}
|
||||
|
||||
// If pull by digest, then verify the manifest digest.
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
verifier := digested.Digest().Verifier()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := verifier.Write(canonical); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
if digested.Digest() != desc.Digest {
|
||||
err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
|
||||
return "", err
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return digested.Digest(), nil
|
||||
}
|
||||
|
||||
return digest.FromBytes(canonical), nil
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// pullManifestList handles "manifest lists" which point to various
|
||||
@ -166,7 +170,10 @@ func pullManifestList(ctx context.Context, ref reference.Named, repo distributio
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageManifest.Platform = manifestDescriptor.Platform
|
||||
|
||||
// Replace platform from config
|
||||
imageManifest.Descriptor.Platform = types.OCIPlatform(&manifestDescriptor.Platform)
|
||||
|
||||
infos = append(infos, imageManifest)
|
||||
}
|
||||
return infos, nil
|
||||
|
||||
@ -1,2 +0,0 @@
|
||||
Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Jessie Frazelle <jess@docker.com> (@jfrazelle)
|
||||
@ -288,10 +288,9 @@ __docker_complete_networks() {
|
||||
COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") )
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2128,SC2178
|
||||
__docker_complete_containers_in_network() {
|
||||
local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1")
|
||||
COMPREPLY=( $(compgen -W "$containers" -- "$cur") )
|
||||
local containers=($(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1"))
|
||||
COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") )
|
||||
}
|
||||
|
||||
# __docker_volumes returns a list of all volumes. Additional options to
|
||||
@ -347,8 +346,7 @@ __docker_plugins_bundled() {
|
||||
for del in "${remove[@]}" ; do
|
||||
plugins=(${plugins[@]/$del/})
|
||||
done
|
||||
# shellcheck disable=SC2145
|
||||
echo "${plugins[@]} ${add[@]}"
|
||||
echo "${plugins[@]}" "${add[@]}"
|
||||
}
|
||||
|
||||
# __docker_complete_plugins_bundled applies completion of plugins based on the current
|
||||
@ -584,6 +582,31 @@ __docker_daemon_os_is() {
|
||||
[ "$actual_os" = "$expected_os" ]
|
||||
}
|
||||
|
||||
# __docker_stack_orchestrator_is tests whether the client is configured to use
|
||||
# the orchestrator that is passed in as the first argument.
|
||||
__docker_stack_orchestrator_is() {
|
||||
case "$1" in
|
||||
kubernetes)
|
||||
if [ -z "$stack_orchestrator_is_kubernetes" ] ; then
|
||||
__docker_q stack ls --help | grep -qe --namespace
|
||||
stack_orchestrator_is_kubernetes=$?
|
||||
fi
|
||||
return $stack_orchestrator_is_kubernetes
|
||||
;;
|
||||
swarm)
|
||||
if [ -z "$stack_orchestrator_is_swarm" ] ; then
|
||||
__docker_q stack deploy --help | grep -qe "with-registry-auth"
|
||||
stack_orchestrator_is_swarm=$?
|
||||
fi
|
||||
return $stack_orchestrator_is_swarm
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
|
||||
esac
|
||||
}
|
||||
|
||||
# __docker_pos_first_nonflag finds the position of the first word that is neither
|
||||
# option nor an option's argument. If there are options that require arguments,
|
||||
# you should pass a glob describing those options, e.g. "--option1|-o|--option2"
|
||||
@ -1050,6 +1073,23 @@ __docker_complete_signals() {
|
||||
COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo "$cur" | tr '[:lower:]' '[:upper:]')" ) )
|
||||
}
|
||||
|
||||
__docker_complete_stack_orchestrator_options() {
|
||||
case "$prev" in
|
||||
--kubeconfig)
|
||||
_filedir
|
||||
return 0
|
||||
;;
|
||||
--namespace)
|
||||
return 0
|
||||
;;
|
||||
--orchestrator)
|
||||
COMPREPLY=( $( compgen -W "all kubernetes swarm" -- "$cur") )
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
__docker_complete_user_group() {
|
||||
if [[ $cur == *:* ]] ; then
|
||||
COMPREPLY=( $(compgen -g -- "${cur#*:}") )
|
||||
@ -1395,7 +1435,7 @@ _docker_container_commit() {
|
||||
_docker_container_cp() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--archive -a --follow-link -L --help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
@ -1414,8 +1454,7 @@ _docker_container_cp() {
|
||||
local containers=( ${COMPREPLY[@]} )
|
||||
|
||||
COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) )
|
||||
# shellcheck disable=SC2128
|
||||
if [[ "$COMPREPLY" == *: ]]; then
|
||||
if [[ "${COMPREPLY[*]}" = *: ]]; then
|
||||
__docker_nospace
|
||||
fi
|
||||
return
|
||||
@ -1913,8 +1952,7 @@ _docker_container_run_and_create() {
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W 'none host private shareable container:' -- "$cur" ) )
|
||||
# shellcheck disable=SC2128
|
||||
if [ "$COMPREPLY" = "container:" ]; then
|
||||
if [ "${COMPREPLY[*]}" = "container:" ]; then
|
||||
__docker_nospace
|
||||
fi
|
||||
;;
|
||||
@ -1968,8 +2006,7 @@ _docker_container_run_and_create() {
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) )
|
||||
# shellcheck disable=SC2128
|
||||
if [ "$COMPREPLY" = "container:" ]; then
|
||||
if [ "${COMPREPLY[*]}" = "container:" ]; then
|
||||
__docker_nospace
|
||||
fi
|
||||
;;
|
||||
@ -2032,15 +2069,14 @@ _docker_container_run_and_create() {
|
||||
|
||||
_docker_container_start() {
|
||||
__docker_complete_detach_keys && return
|
||||
# shellcheck disable=SC2078
|
||||
case "$prev" in
|
||||
--checkpoint)
|
||||
if [ __docker_daemon_is_experimental ] ; then
|
||||
if __docker_daemon_is_experimental ; then
|
||||
return
|
||||
fi
|
||||
;;
|
||||
--checkpoint-dir)
|
||||
if [ __docker_daemon_is_experimental ] ; then
|
||||
if __docker_daemon_is_experimental ; then
|
||||
_filedir -d
|
||||
return
|
||||
fi
|
||||
@ -2222,6 +2258,7 @@ _docker_daemon() {
|
||||
--cpu-rt-period
|
||||
--cpu-rt-runtime
|
||||
--data-root
|
||||
--default-address-pool
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-runtime
|
||||
@ -3219,7 +3256,7 @@ _docker_service_logs() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--follow -f --help --no-resolve --no-task-ids --no-trunc --since --tail --timestamps -t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--details --follow -f --help --no-resolve --no-task-ids --no-trunc --raw --since --tail --timestamps -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--since|--tail')
|
||||
@ -3401,6 +3438,7 @@ _docker_service_update_and_create() {
|
||||
local boolean_options="
|
||||
--detach -d
|
||||
--help
|
||||
--init
|
||||
--no-healthcheck
|
||||
--read-only
|
||||
--tty -t
|
||||
@ -4376,11 +4414,15 @@ _docker_stack() {
|
||||
remove
|
||||
up
|
||||
"
|
||||
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
__docker_subcommands "$subcommands $aliases" && return
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
local options="--help --orchestrator"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
|
||||
@ -4389,12 +4431,12 @@ _docker_stack() {
|
||||
}
|
||||
|
||||
_docker_stack_deploy() {
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$prev" in
|
||||
--bundle-file)
|
||||
if __docker_daemon_is_experimental ; then
|
||||
_filedir dab
|
||||
return
|
||||
fi
|
||||
_filedir dab
|
||||
return
|
||||
;;
|
||||
--compose-file|-c)
|
||||
_filedir yml
|
||||
@ -4408,12 +4450,14 @@ _docker_stack_deploy() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
local options="--compose-file -c --help --prune --resolve-image --with-registry-auth"
|
||||
__docker_daemon_is_experimental && options+=" --bundle-file"
|
||||
local options="--compose-file -c --help --orchestrator"
|
||||
__docker_daemon_is_experimental && __docker_stack_orchestrator_is swarm && options+=" --bundle-file"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig --namespace"
|
||||
__docker_stack_orchestrator_is swarm && options+=" --prune --resolve-image --with-registry-auth"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--bundle-file|--compose-file|-c|--resolve-image')
|
||||
local counter=$(__docker_pos_first_nonflag '--bundle-file|--compose-file|-c|--kubeconfig|--namespace|--orchestrator|--resolve-image')
|
||||
if [ "$cword" -eq "$counter" ]; then
|
||||
__docker_complete_stacks
|
||||
fi
|
||||
@ -4430,6 +4474,8 @@ _docker_stack_list() {
|
||||
}
|
||||
|
||||
_docker_stack_ls() {
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$prev" in
|
||||
--format)
|
||||
return
|
||||
@ -4438,7 +4484,9 @@ _docker_stack_ls() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--format --help" -- "$cur" ) )
|
||||
local options="--format --help --orchestrator"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --all-namespaces --kubeconfig --namespace"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@ -4460,6 +4508,8 @@ _docker_stack_ps() {
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) )
|
||||
@ -4473,10 +4523,12 @@ _docker_stack_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) )
|
||||
local options="--filter -f --format --help --no-resolve --no-trunc --orchestrator --quiet -q"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --all-namespaces --kubeconfig --namespace"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--filter|-f')
|
||||
local counter=$(__docker_pos_first_nonflag '--all-namespaces|--filter|-f|--format|--kubeconfig|--namespace')
|
||||
if [ "$cword" -eq "$counter" ]; then
|
||||
__docker_complete_stacks
|
||||
fi
|
||||
@ -4489,9 +4541,13 @@ _docker_stack_remove() {
|
||||
}
|
||||
|
||||
_docker_stack_rm() {
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
local options="--help --orchestrator"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig --namespace"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_complete_stacks
|
||||
@ -4515,6 +4571,8 @@ _docker_stack_services() {
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) )
|
||||
@ -4528,10 +4586,12 @@ _docker_stack_services() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) )
|
||||
local options="--filter -f --format --help --orchestrator --quiet -q"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig --namespace"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--filter|-f|--format')
|
||||
local counter=$(__docker_pos_first_nonflag '--filter|-f|--format|--kubeconfig|--namespace|--orchestrator')
|
||||
if [ "$cword" -eq "$counter" ]; then
|
||||
__docker_complete_stacks
|
||||
fi
|
||||
@ -4618,6 +4678,7 @@ _docker_system_events() {
|
||||
enable
|
||||
exec_create
|
||||
exec_detach
|
||||
exec_die
|
||||
exec_start
|
||||
export
|
||||
health_status
|
||||
@ -4655,6 +4716,10 @@ _docker_system_events() {
|
||||
__docker_complete_networks --cur "${cur##*=}"
|
||||
return
|
||||
;;
|
||||
scope)
|
||||
COMPREPLY=( $( compgen -W "local swarm" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
type)
|
||||
COMPREPLY=( $( compgen -W "config container daemon image network plugin secret service volume" -- "${cur##*=}" ) )
|
||||
return
|
||||
@ -4667,7 +4732,7 @@ _docker_system_events() {
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -S = -W "container daemon event image label network scope type volume" -- "$cur" ) )
|
||||
__docker_nospace
|
||||
return
|
||||
;;
|
||||
@ -4793,6 +4858,8 @@ _docker_top() {
|
||||
}
|
||||
|
||||
_docker_version() {
|
||||
__docker_complete_stack_orchestrator_options && return
|
||||
|
||||
case "$prev" in
|
||||
--format|-f)
|
||||
return
|
||||
@ -4801,7 +4868,9 @@ _docker_version() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) )
|
||||
local options="--format -f --help"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@ -5031,6 +5100,9 @@ _docker() {
|
||||
|
||||
local host config daemon_os
|
||||
|
||||
# variables to cache client info, populated on demand for performance reasons
|
||||
local stack_orchestrator_is_kubernetes stack_orchestrator_is_swarm
|
||||
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
@ -431,7 +431,7 @@ __docker_complete_events_filter() {
|
||||
integer ret=1
|
||||
declare -a opts
|
||||
|
||||
opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume')
|
||||
opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'scope' 'type' 'volume')
|
||||
|
||||
if compset -P '*='; then
|
||||
case "${${words[-1]%=*}#*=}" in
|
||||
@ -461,6 +461,11 @@ __docker_complete_events_filter() {
|
||||
(network)
|
||||
__docker_complete_networks && ret=0
|
||||
;;
|
||||
(scope)
|
||||
local -a scope_opts
|
||||
scope_opts=('local' 'swarm')
|
||||
_describe -t scope-filter-opts "scope filter options" scope_opts && ret=0
|
||||
;;
|
||||
(type)
|
||||
local -a type_opts
|
||||
type_opts=('container' 'daemon' 'image' 'network' 'volume')
|
||||
@ -923,7 +928,7 @@ __docker_container_subcommand() {
|
||||
local state
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
opts_create_run_update \
|
||||
$opts_create_run_update \
|
||||
"($help -)*: :->values" && ret=0
|
||||
case $state in
|
||||
(values)
|
||||
@ -2209,7 +2214,7 @@ __docker_stack_subcommand() {
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \
|
||||
"($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \
|
||||
"($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file, or '-' to read from stdin]:compose file:_files -g \"*.(yml|yaml)\"" \
|
||||
"($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \
|
||||
"($help -):stack:__docker_complete_stacks" && ret=0
|
||||
;;
|
||||
|
||||
@ -28,7 +28,7 @@ Aliases:
|
||||
|
||||
Options:
|
||||
--bundle-file string Path to a Distributed Application Bundle file
|
||||
--compose-file string Path to a Compose file
|
||||
--compose-file string Path to a Compose file, or "-" to read from stdin
|
||||
--help Print usage
|
||||
--prune Prune services that are no longer referenced
|
||||
--with-registry-auth Send registry authentication details to Swarm agents
|
||||
|
||||
@ -1432,11 +1432,12 @@ The list of currently supported options that can be reconfigured is this:
|
||||
specified at container creation. It defaults to "default" which is
|
||||
the runtime shipped with the official docker packages.
|
||||
- `runtimes`: it updates the list of available OCI runtimes that can
|
||||
be used to run containers
|
||||
be used to run containers.
|
||||
- `authorization-plugin`: specifies the authorization plugins to use.
|
||||
- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries.
|
||||
- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config.
|
||||
- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config.
|
||||
- `shutdown-timeout`: it replaces the daemon's existing configuration timeout with a new timeout for shutting down all containers.
|
||||
|
||||
Updating and reloading the cluster configurations such as `--cluster-store`,
|
||||
`--cluster-advertise` and `--cluster-store-opts` will take effect only if
|
||||
|
||||
@ -80,7 +80,7 @@ The following example uses a template without headers and outputs the
|
||||
`ID` and `CreatedSince` entries separated by a colon for the `busybox` image:
|
||||
|
||||
```bash
|
||||
$ docker history --format "{{.ID}}: {{.CreatedAt}}" busybox
|
||||
$ docker history --format "{{.ID}}: {{.CreatedSince}}" busybox
|
||||
|
||||
f6e427c148a7: 4 weeks ago
|
||||
<missing>: 4 weeks ago
|
||||
|
||||
@ -25,7 +25,7 @@ Aliases:
|
||||
|
||||
Options:
|
||||
--bundle-file string Path to a Distributed Application Bundle file
|
||||
-c, --compose-file strings Path to a Compose file
|
||||
-c, --compose-file strings Path to a Compose file, or "-" to read from stdin
|
||||
--help Print usage
|
||||
--kubeconfig string Kubernetes config file
|
||||
--namespace string Kubernetes namespace to use
|
||||
|
||||
24
e2e/stack/help_test.go
Normal file
24
e2e/stack/help_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/golden"
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestStackDeployHelp(t *testing.T) {
|
||||
t.Run("Swarm", func(t *testing.T) {
|
||||
testStackDeployHelp(t, "swarm")
|
||||
})
|
||||
t.Run("Kubernetes", func(t *testing.T) {
|
||||
testStackDeployHelp(t, "kubernetes")
|
||||
})
|
||||
}
|
||||
|
||||
func testStackDeployHelp(t *testing.T, orchestrator string) {
|
||||
result := icmd.RunCommand("docker", "stack", "deploy", "--orchestrator", orchestrator, "--help")
|
||||
result.Assert(t, icmd.Success)
|
||||
golden.Assert(t, result.Stdout(), fmt.Sprintf("stack-deploy-help-%s.golden", orchestrator))
|
||||
}
|
||||
14
e2e/stack/testdata/stack-deploy-help-kubernetes.golden
vendored
Normal file
14
e2e/stack/testdata/stack-deploy-help-kubernetes.golden
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
|
||||
Usage: docker stack deploy [OPTIONS] STACK
|
||||
|
||||
Deploy a new stack or update an existing stack
|
||||
|
||||
Aliases:
|
||||
deploy, up
|
||||
|
||||
Options:
|
||||
-c, --compose-file strings Path to a Compose file, or "-" to read
|
||||
from stdin
|
||||
--kubeconfig string Kubernetes config file
|
||||
--namespace string Kubernetes namespace to use
|
||||
--orchestrator string Orchestrator to use (swarm|kubernetes|all)
|
||||
19
e2e/stack/testdata/stack-deploy-help-swarm.golden
vendored
Normal file
19
e2e/stack/testdata/stack-deploy-help-swarm.golden
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
|
||||
Usage: docker stack deploy [OPTIONS] STACK
|
||||
|
||||
Deploy a new stack or update an existing stack
|
||||
|
||||
Aliases:
|
||||
deploy, up
|
||||
|
||||
Options:
|
||||
--bundle-file string Path to a Distributed Application Bundle file
|
||||
-c, --compose-file strings Path to a Compose file, or "-" to read
|
||||
from stdin
|
||||
--orchestrator string Orchestrator to use (swarm|kubernetes|all)
|
||||
--prune Prune services that are no longer referenced
|
||||
--resolve-image string Query the registry to resolve image digest
|
||||
and supported platforms
|
||||
("always"|"changed"|"never") (default "always")
|
||||
--with-registry-auth Send registry authentication details to
|
||||
Swarm agents
|
||||
@ -41,7 +41,7 @@ The following example uses a template without headers and outputs the
|
||||
`ID` and `CreatedSince` entries separated by a colon for all images:
|
||||
|
||||
```bash
|
||||
$ docker images --format "{{.ID}}: {{.Created}} ago"
|
||||
$ docker images --format "{{.ID}}: {{.CreatedSince}} ago"
|
||||
|
||||
cc1b61406712: 2 weeks ago
|
||||
<missing>: 2 weeks ago
|
||||
|
||||
27
vendor.conf
27
vendor.conf
@ -1,13 +1,14 @@
|
||||
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667
|
||||
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
||||
github.com/coreos/etcd v3.2.1
|
||||
github.com/cpuguy83/go-md2man v1.0.8
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
|
||||
github.com/docker/docker c752b0991e31ba9869ab6a0661af57e9423874fb
|
||||
github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b
|
||||
github.com/docker/docker 371b590ace0d4a329cd6a3328d31d33c4f77a780 https://github.com/docker/engine
|
||||
github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962
|
||||
# the docker/go package contains a customized version of canonical/json
|
||||
# and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
|
||||
@ -15,7 +16,7 @@ github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
||||
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
github.com/docker/swarmkit edd5641391926a50bc5f7040e20b7efc05003c26
|
||||
github.com/docker/swarmkit 199cf49cd99690135d99e52a1907ec82e8113c4f
|
||||
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
github.com/emicklei/go-restful-swagger12 dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
||||
@ -46,15 +47,15 @@ github.com/json-iterator/go 6240e1e7983a85228f7fd9c3e1b6932d46ec58e2
|
||||
github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/Microsoft/go-winio v0.4.6
|
||||
github.com/Microsoft/go-winio v0.4.8
|
||||
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
|
||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
|
||||
github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb
|
||||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
||||
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
|
||||
github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||
@ -84,12 +85,12 @@ google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||
google.golang.org/grpc v1.12.0
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
k8s.io/api kubernetes-1.8.2
|
||||
k8s.io/apimachinery kubernetes-1.8.2
|
||||
k8s.io/client-go kubernetes-1.8.2
|
||||
k8s.io/kubernetes v1.8.2
|
||||
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
|
||||
k8s.io/api kubernetes-1.8.14
|
||||
k8s.io/apimachinery kubernetes-1.8.14
|
||||
k8s.io/client-go kubernetes-1.8.14
|
||||
k8s.io/kubernetes v1.8.14
|
||||
k8s.io/kube-openapi 0c329704159e3b051aafac400b15baacf2a94a04
|
||||
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
||||
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
|
||||
github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761
|
||||
github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
|
||||
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
||||
|
||||
3
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -16,7 +16,6 @@ import (
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
@ -153,8 +152,6 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
// Set the timer resolution to 1. This fixes a performance regression in golang 1.6.
|
||||
timeBeginPeriod(1)
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
|
||||
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -121,6 +121,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -175,16 +180,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state uint32
|
||||
err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if state&cPIPE_READMODE_MESSAGE != 0 {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")}
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
|
||||
8
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -38,14 +38,12 @@ func errnoErr(e syscall.Errno) error {
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modwinmm = windows.NewLazySystemDLL("winmm.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
@ -122,12 +120,6 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
||||
return
|
||||
}
|
||||
|
||||
func timeBeginPeriod(period uint32) (n int32) {
|
||||
r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
|
||||
n = int32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
|
||||
20
vendor/github.com/containerd/console/console_linux.go
generated
vendored
20
vendor/github.com/containerd/console/console_linux.go
generated
vendored
@ -72,7 +72,7 @@ func NewEpoller() (*Epoller, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Add creates a epoll console based on the provided console. The console will
|
||||
// Add creates an epoll console based on the provided console. The console will
|
||||
// be registered with EPOLLET (i.e. using edge-triggered notification) and its
|
||||
// file descriptor will be set to non-blocking mode. After this, user should use
|
||||
// the return console to perform I/O.
|
||||
@ -134,7 +134,7 @@ func (e *Epoller) Wait() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Close unregister the console's file descriptor from epoll interface
|
||||
// CloseConsole unregisters the console's file descriptor from epoll interface
|
||||
func (e *Epoller) CloseConsole(fd int) error {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
@ -149,12 +149,12 @@ func (e *Epoller) getConsole(sysfd int) *EpollConsole {
|
||||
return f
|
||||
}
|
||||
|
||||
// Close the epoll fd
|
||||
// Close closes the epoll fd
|
||||
func (e *Epoller) Close() error {
|
||||
return unix.Close(e.efd)
|
||||
}
|
||||
|
||||
// EpollConsole acts like a console but register its file descriptor with a
|
||||
// EpollConsole acts like a console but registers its file descriptor with an
|
||||
// epoll fd and uses epoll API to perform I/O.
|
||||
type EpollConsole struct {
|
||||
Console
|
||||
@ -167,7 +167,7 @@ type EpollConsole struct {
|
||||
// Read reads up to len(p) bytes into p. It returns the number of bytes read
|
||||
// (0 <= n <= len(p)) and any error encountered.
|
||||
//
|
||||
// If the console's read returns EAGAIN or EIO, we assumes that its a
|
||||
// If the console's read returns EAGAIN or EIO, we assume that it's a
|
||||
// temporary error because the other side went away and wait for the signal
|
||||
// generated by epoll event to continue.
|
||||
func (ec *EpollConsole) Read(p []byte) (n int, err error) {
|
||||
@ -207,7 +207,7 @@ func (ec *EpollConsole) Read(p []byte) (n int, err error) {
|
||||
// written from p (0 <= n <= len(p)) and any error encountered that caused
|
||||
// the write to stop early.
|
||||
//
|
||||
// If writes to the console returns EAGAIN or EIO, we assumes that its a
|
||||
// If writes to the console returns EAGAIN or EIO, we assume that it's a
|
||||
// temporary error because the other side went away and wait for the signal
|
||||
// generated by epoll event to continue.
|
||||
func (ec *EpollConsole) Write(p []byte) (n int, err error) {
|
||||
@ -224,7 +224,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) {
|
||||
} else {
|
||||
hangup = (err == unix.EAGAIN || err == unix.EIO)
|
||||
}
|
||||
// if the other end disappear, assume this is temporary and wait for the
|
||||
// if the other end disappears, assume this is temporary and wait for the
|
||||
// signal to continue again.
|
||||
if hangup {
|
||||
ec.writec.Wait()
|
||||
@ -242,7 +242,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closed the file descriptor and signal call waiters for this fd.
|
||||
// Shutdown closes the file descriptor and signals call waiters for this fd.
|
||||
// It accepts a callback which will be called with the console's fd. The
|
||||
// callback typically will be used to do further cleanup such as unregister the
|
||||
// console's fd from the epoll interface.
|
||||
@ -262,10 +262,14 @@ func (ec *EpollConsole) Shutdown(close func(int) error) error {
|
||||
|
||||
// signalRead signals that the console is readable.
|
||||
func (ec *EpollConsole) signalRead() {
|
||||
ec.readc.L.Lock()
|
||||
ec.readc.Signal()
|
||||
ec.readc.L.Unlock()
|
||||
}
|
||||
|
||||
// signalWrite signals that the console is writable.
|
||||
func (ec *EpollConsole) signalWrite() {
|
||||
ec.writec.L.Lock()
|
||||
ec.writec.Signal()
|
||||
ec.writec.L.Unlock()
|
||||
}
|
||||
|
||||
108
vendor/github.com/containerd/console/console_windows.go
generated
vendored
108
vendor/github.com/containerd/console/console_windows.go
generated
vendored
@ -17,7 +17,6 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@ -29,90 +28,55 @@ var (
|
||||
ErrNotImplemented = errors.New("not implemented")
|
||||
)
|
||||
|
||||
func (m *master) initStdios() {
|
||||
m.in = windows.Handle(os.Stdin.Fd())
|
||||
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
|
||||
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
|
||||
if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
|
||||
vtInputSupported = true
|
||||
}
|
||||
// Unconditionally set the console mode back even on failure because SetConsoleMode
|
||||
// remembers invalid bits on input handles.
|
||||
windows.SetConsoleMode(m.in, m.inMode)
|
||||
} else {
|
||||
fmt.Printf("failed to get console mode for stdin: %v\n", err)
|
||||
}
|
||||
|
||||
m.out = windows.Handle(os.Stdout.Fd())
|
||||
if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
|
||||
if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
||||
m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
func (m *master) init() {
|
||||
m.h = windows.Handle(m.f.Fd())
|
||||
if err := windows.GetConsoleMode(m.h, &m.mode); err == nil {
|
||||
if m.f == os.Stdin {
|
||||
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
|
||||
if err = windows.SetConsoleMode(m.h, m.mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
|
||||
vtInputSupported = true
|
||||
}
|
||||
// Unconditionally set the console mode back even on failure because SetConsoleMode
|
||||
// remembers invalid bits on input handles.
|
||||
windows.SetConsoleMode(m.h, m.mode)
|
||||
} else if err := windows.SetConsoleMode(m.h, m.mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
||||
m.mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
} else {
|
||||
windows.SetConsoleMode(m.out, m.outMode)
|
||||
windows.SetConsoleMode(m.h, m.mode)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("failed to get console mode for stdout: %v\n", err)
|
||||
}
|
||||
|
||||
m.err = windows.Handle(os.Stderr.Fd())
|
||||
if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
|
||||
if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
||||
m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
} else {
|
||||
windows.SetConsoleMode(m.err, m.errMode)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("failed to get console mode for stderr: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
type master struct {
|
||||
in windows.Handle
|
||||
inMode uint32
|
||||
|
||||
out windows.Handle
|
||||
outMode uint32
|
||||
|
||||
err windows.Handle
|
||||
errMode uint32
|
||||
h windows.Handle
|
||||
mode uint32
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (m *master) SetRaw() error {
|
||||
if err := makeInputRaw(m.in, m.inMode); err != nil {
|
||||
return err
|
||||
if m.f == os.Stdin {
|
||||
if err := makeInputRaw(m.h, m.mode); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Set StdOut and StdErr to raw mode, we ignore failures since
|
||||
// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
|
||||
// Windows.
|
||||
windows.SetConsoleMode(m.h, m.mode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
||||
}
|
||||
|
||||
// Set StdOut and StdErr to raw mode, we ignore failures since
|
||||
// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
|
||||
// Windows.
|
||||
|
||||
windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
||||
|
||||
windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *master) Reset() error {
|
||||
for _, s := range []struct {
|
||||
fd windows.Handle
|
||||
mode uint32
|
||||
}{
|
||||
{m.in, m.inMode},
|
||||
{m.out, m.outMode},
|
||||
{m.err, m.errMode},
|
||||
} {
|
||||
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
|
||||
return errors.Wrap(err, "unable to restore console mode")
|
||||
}
|
||||
if err := windows.SetConsoleMode(m.h, m.mode); err != nil {
|
||||
return errors.Wrap(err, "unable to restore console mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *master) Size() (WinSize, error) {
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
err := windows.GetConsoleScreenBufferInfo(m.out, &info)
|
||||
err := windows.GetConsoleScreenBufferInfo(m.h, &info)
|
||||
if err != nil {
|
||||
return WinSize{}, errors.Wrap(err, "unable to get console info")
|
||||
}
|
||||
@ -134,11 +98,11 @@ func (m *master) ResizeFrom(c Console) error {
|
||||
}
|
||||
|
||||
func (m *master) DisableEcho() error {
|
||||
mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
|
||||
mode := m.mode &^ windows.ENABLE_ECHO_INPUT
|
||||
mode |= windows.ENABLE_PROCESSED_INPUT
|
||||
mode |= windows.ENABLE_LINE_INPUT
|
||||
|
||||
if err := windows.SetConsoleMode(m.in, mode); err != nil {
|
||||
if err := windows.SetConsoleMode(m.h, mode); err != nil {
|
||||
return errors.Wrap(err, "unable to set console to disable echo")
|
||||
}
|
||||
|
||||
@ -150,15 +114,15 @@ func (m *master) Close() error {
|
||||
}
|
||||
|
||||
func (m *master) Read(b []byte) (int, error) {
|
||||
panic("not implemented on windows")
|
||||
return m.f.Read(b)
|
||||
}
|
||||
|
||||
func (m *master) Write(b []byte) (int, error) {
|
||||
panic("not implemented on windows")
|
||||
return m.f.Write(b)
|
||||
}
|
||||
|
||||
func (m *master) Fd() uintptr {
|
||||
return uintptr(m.in)
|
||||
return uintptr(m.h)
|
||||
}
|
||||
|
||||
// on windows, console can only be made from os.Std{in,out,err}, hence there
|
||||
@ -210,7 +174,7 @@ func newMaster(f *os.File) (Console, error) {
|
||||
if f != os.Stdin && f != os.Stdout && f != os.Stderr {
|
||||
return nil, errors.New("creating a console from a file is not supported on windows")
|
||||
}
|
||||
m := &master{}
|
||||
m.initStdios()
|
||||
m := &master{f: f}
|
||||
m.init()
|
||||
return m, nil
|
||||
}
|
||||
|
||||
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
215
vendor/github.com/containerd/containerd/README.md
generated
vendored
Normal file
215
vendor/github.com/containerd/containerd/README.md
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||

|
||||
|
||||
[](https://godoc.org/github.com/containerd/containerd)
|
||||
[](https://travis-ci.org/containerd/containerd)
|
||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/1271)
|
||||
|
||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
|
||||
|
||||
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
||||
|
||||

|
||||
|
||||
## Getting Started
|
||||
|
||||
See our documentation on [containerd.io](https://containerd.io):
|
||||
* [for ops and admins](docs/ops.md)
|
||||
* [namespaces](docs/namespaces.md)
|
||||
* [client options](docs/client-opts.md)
|
||||
|
||||
See how to build containerd from source at [BUILDING](BUILDING.md).
|
||||
|
||||
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
|
||||
|
||||
|
||||
## Runtime Requirements
|
||||
|
||||
Runtime requirements for containerd are very minimal. Most interactions with
|
||||
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
|
||||
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
|
||||
|
||||
There are specific features
|
||||
used by containerd core code and snapshotters that will require a minimum kernel
|
||||
version on Linux. With the understood caveat of distro kernel versioning, a
|
||||
reasonable starting point for Linux is a minimum 4.x kernel version.
|
||||
|
||||
The overlay filesystem snapshotter, used by default, uses features that were
|
||||
finalized in the 4.x kernel series. If you choose to use btrfs, there may
|
||||
be more flexibility in kernel version (minimum recommended is 3.18), but will
|
||||
require the btrfs kernel module and btrfs tools to be installed on your Linux
|
||||
distribution.
|
||||
|
||||
To use Linux checkpoint and restore features, you will need `criu` installed on
|
||||
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
|
||||
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
## Features
|
||||
|
||||
### Client
|
||||
|
||||
containerd offers a full client package to help you integrate containerd into your platform.
|
||||
|
||||
```go
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||
defer client.Close()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Namespaces
|
||||
|
||||
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
|
||||
|
||||
To set a namespace for requests to the API:
|
||||
|
||||
```go
|
||||
context = context.Background()
|
||||
// create a context for docker
|
||||
docker = namespaces.WithNamespace(context, "docker")
|
||||
|
||||
containerd, err := client.NewContainer(docker, "id")
|
||||
```
|
||||
|
||||
To set a default namespace on the client:
|
||||
|
||||
```go
|
||||
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
|
||||
```
|
||||
|
||||
### Distribution
|
||||
|
||||
```go
|
||||
// pull an image
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest")
|
||||
|
||||
// push an image
|
||||
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||
```
|
||||
|
||||
### Containers
|
||||
|
||||
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master")
|
||||
defer redis.Delete(context)
|
||||
```
|
||||
|
||||
### OCI Runtime Specification
|
||||
|
||||
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||
|
||||
You can specify options when creating a container about how to modify the specification.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image)))
|
||||
```
|
||||
|
||||
### Root Filesystems
|
||||
|
||||
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
|
||||
|
||||
```go
|
||||
// pull an image and unpack it into the configured snapshotter
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
|
||||
|
||||
// allocate a new RW root filesystem for a container based on the image
|
||||
redis, err := client.NewContainer(context, "redis-master",
|
||||
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
|
||||
// use a readonly filesystem with multiple containers
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprintf("id-%s", i)
|
||||
container, err := client.NewContainer(ctx, id,
|
||||
containerd.WithNewSnapshotView(id, image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### Tasks
|
||||
|
||||
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
|
||||
|
||||
```go
|
||||
// create a new task
|
||||
task, err := redis.NewTask(context, cio.Stdio)
|
||||
defer task.Delete(context)
|
||||
|
||||
// the task is now running and has a pid that can be use to setup networking
|
||||
// or other runtime settings outside of containerd
|
||||
pid := task.Pid()
|
||||
|
||||
// start the redis-server process inside the container
|
||||
err := task.Start(context)
|
||||
|
||||
// wait for the task to exit and get the exit status
|
||||
status, err := task.Wait(context)
|
||||
```
|
||||
|
||||
### Checkpoint and Restore
|
||||
|
||||
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allow you to clone and/or live migrate containers to other machines.
|
||||
|
||||
```go
|
||||
// checkpoint the task then push it to a registry
|
||||
checkpoint, err := task.Checkpoint(context, containerd.WithExit)
|
||||
|
||||
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||
|
||||
// on a new machine pull the checkpoint and restore the redis container
|
||||
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||
|
||||
checkpoint := image.Target()
|
||||
|
||||
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
defer task.Delete(context)
|
||||
|
||||
err := task.Start(context)
|
||||
```
|
||||
|
||||
### Releases and API Stability
|
||||
|
||||
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
|
||||
of containerd components.
|
||||
|
||||
### Development reports.
|
||||
|
||||
Weekly summary on the progress and what is being worked on.
|
||||
https://github.com/containerd/containerd/tree/master/reports
|
||||
|
||||
### Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||
|
||||
**Slack:** https://dockr.ly/community
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
||||
## Licenses
|
||||
|
||||
The containerd codebase is released under the [Apache 2.0 license](LICENSE.code).
|
||||
The README.md file, and files in the "docs" folder are licensed under the
|
||||
Creative Commons Attribution 4.0 International License. You may obtain a
|
||||
copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Cause(err) == ErrInvalidArgument
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Cause(err) == ErrNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Cause(err) == ErrAlreadyExists
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Cause(err) == ErrFailedPrecondition
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Cause(err) == ErrUnavailable
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Cause(err) == ErrNotImplemented
|
||||
}
|
||||
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrapf(cls, msg)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
86
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
86
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// G is an alias for GetLogger.
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
G = GetLogger
|
||||
|
||||
// L is an alias for the the standard logger.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
)
|
||||
|
||||
type (
|
||||
loggerKey struct{}
|
||||
)
|
||||
|
||||
// TraceLevel is the log level for tracing. Trace level is lower than debug level,
|
||||
// and is usually used to trace detailed behavior of the program.
|
||||
const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
// It supports trace level.
|
||||
func ParseLevel(lvl string) (logrus.Level, error) {
|
||||
if lvl == "trace" {
|
||||
return TraceLevel, nil
|
||||
}
|
||||
return logrus.ParseLevel(lvl)
|
||||
}
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
}
|
||||
|
||||
// Trace logs a message at level Trace with the log entry passed-in.
|
||||
func Trace(e *logrus.Entry, args ...interface{}) {
|
||||
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
|
||||
if level >= TraceLevel {
|
||||
e.Debug(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef logs a message at level Trace with the log entry passed-in.
|
||||
func Tracef(e *logrus.Entry, format string, args ...interface{}) {
|
||||
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
|
||||
if level >= TraceLevel {
|
||||
e.Debugf(format, args...)
|
||||
}
|
||||
}
|
||||
101
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
Normal file
101
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Present the ARM instruction set architecture, eg: v7, v8
|
||||
var cpuVariant string
|
||||
|
||||
func init() {
|
||||
if isArmArch(runtime.GOARCH) {
|
||||
cpuVariant = getCPUVariant()
|
||||
} else {
|
||||
cpuVariant = ""
|
||||
}
|
||||
}
|
||||
|
||||
// For Linux, the kernel has already detected the ABI, ISA and Features.
|
||||
// So we don't need to access the ARM registers to detect platform information
|
||||
// by ourselves. We can just parse these information from /proc/cpuinfo
|
||||
func getCPUInfo(pattern string) (info string, err error) {
|
||||
if !isLinuxOS(runtime.GOOS) {
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
cpuinfo, err := os.Open("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cpuinfo.Close()
|
||||
|
||||
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
|
||||
// the first core is enough.
|
||||
scanner := bufio.NewScanner(cpuinfo)
|
||||
for scanner.Scan() {
|
||||
newline := scanner.Text()
|
||||
list := strings.Split(newline, ":")
|
||||
|
||||
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
|
||||
return strings.TrimSpace(list[1]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether the scanner encountered errors
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
|
||||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
variant, err := getCPUInfo("Cpu architecture")
|
||||
if err != nil {
|
||||
log.L.WithError(err).Error("failure getting variant")
|
||||
return ""
|
||||
}
|
||||
|
||||
switch variant {
|
||||
case "8":
|
||||
variant = "v8"
|
||||
case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
|
||||
variant = "v7"
|
||||
case "6", "6TEJ":
|
||||
variant = "v6"
|
||||
case "5", "5T", "5TE", "5TEJ":
|
||||
variant = "v5"
|
||||
case "4", "4T":
|
||||
variant = "v4"
|
||||
case "3":
|
||||
variant = "v3"
|
||||
default:
|
||||
variant = "unknown"
|
||||
}
|
||||
|
||||
return variant
|
||||
}
|
||||
114
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
Normal file
114
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// isLinuxOS returns true if the operating system is Linux.
|
||||
//
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isLinuxOS(os string) bool {
|
||||
return os == "linux"
|
||||
}
|
||||
|
||||
// These function are generated from from https://golang.org/src/go/build/syslist.go.
|
||||
//
|
||||
// We use switch statements because they are slightly faster than map lookups
|
||||
// and use a little less memory.
|
||||
|
||||
// isKnownOS returns true if we know about the operating system.
|
||||
//
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isKnownOS(os string) bool {
|
||||
switch os {
|
||||
case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isArmArch returns true if the architecture is ARM.
|
||||
//
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isArmArch(arch string) bool {
|
||||
switch arch {
|
||||
case "arm", "arm64":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isKnownArch returns true if we know about the architecture.
|
||||
//
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isKnownArch(arch string) bool {
|
||||
switch arch {
|
||||
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeOS(os string) string {
|
||||
if os == "" {
|
||||
return runtime.GOOS
|
||||
}
|
||||
os = strings.ToLower(os)
|
||||
|
||||
switch os {
|
||||
case "macos":
|
||||
os = "darwin"
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
// normalizeArch normalizes the architecture.
|
||||
func normalizeArch(arch, variant string) (string, string) {
|
||||
arch, variant = strings.ToLower(arch), strings.ToLower(variant)
|
||||
switch arch {
|
||||
case "i386":
|
||||
arch = "386"
|
||||
variant = ""
|
||||
case "x86_64", "x86-64":
|
||||
arch = "amd64"
|
||||
variant = ""
|
||||
case "aarch64", "arm64":
|
||||
arch = "arm64"
|
||||
switch variant {
|
||||
case "8", "v8":
|
||||
variant = ""
|
||||
}
|
||||
case "armhf":
|
||||
arch = "arm"
|
||||
variant = "v7"
|
||||
case "armel":
|
||||
arch = "arm"
|
||||
variant = "v6"
|
||||
case "arm":
|
||||
switch variant {
|
||||
case "", "7":
|
||||
variant = "v7"
|
||||
case "5", "6", "8":
|
||||
variant = "v" + variant
|
||||
}
|
||||
}
|
||||
|
||||
return arch, variant
|
||||
}
|
||||
38
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
Normal file
38
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Default returns the default specifier for the platform.
|
||||
func Default() string {
|
||||
return Format(DefaultSpec())
|
||||
}
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant,
|
||||
}
|
||||
}
|
||||
268
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
Normal file
268
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package platforms provides a toolkit for normalizing, matching and
|
||||
// specifying container platforms.
|
||||
//
|
||||
// Centered around OCI platform specifications, we define a string-based
|
||||
// specifier syntax that can be used for user input. With a specifier, users
|
||||
// only need to specify the parts of the platform that are relevant to their
|
||||
// context, providing an operating system or architecture or both.
|
||||
//
|
||||
// How do I use this package?
|
||||
//
|
||||
// The vast majority of use cases should simply use the match function with
|
||||
// user input. The first step is to parse a specifier into a matcher:
|
||||
//
|
||||
// m, err := Parse("linux")
|
||||
// if err != nil { ... }
|
||||
//
|
||||
// Once you have a matcher, use it to match against the platform declared by a
|
||||
// component, typically from an image or runtime. Since extracting an images
|
||||
// platform is a little more involved, we'll use an example against the
|
||||
// platform default:
|
||||
//
|
||||
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
|
||||
//
|
||||
// This can be composed in loops for resolving runtimes or used as a filter for
|
||||
// fetch and select images.
|
||||
//
|
||||
// More details of the specifier syntax and platform spec follow.
|
||||
//
|
||||
// Declaring Platform Support
|
||||
//
|
||||
// Components that have strict platform requirements should use the OCI
|
||||
// platform specification to declare their support. Typically, this will be
|
||||
// images and runtimes that should make these declaring which platform they
|
||||
// support specifically. This looks roughly as follows:
|
||||
//
|
||||
// type Platform struct {
|
||||
// Architecture string
|
||||
// OS string
|
||||
// Variant string
|
||||
// }
|
||||
//
|
||||
// Most images and runtimes should at least set Architecture and OS, according
|
||||
// to their GOARCH and GOOS values, respectively (follow the OCI image
|
||||
// specification when in doubt). ARM should set variant under certain
|
||||
// discussions, which are outlined below.
|
||||
//
|
||||
// Platform Specifiers
|
||||
//
|
||||
// While the OCI platform specifications provide a tool for components to
|
||||
// specify structured information, user input typically doesn't need the full
|
||||
// context and much can be inferred. To solve this problem, we introduced
|
||||
// "specifiers". A specifier has the format
|
||||
// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
|
||||
// operating system or the architecture or both.
|
||||
//
|
||||
// An example of a common specifier is `linux/amd64`. If the host has a default
|
||||
// of runtime that matches this, the user can simply provide the component that
|
||||
// matters. For example, if a image provides amd64 and arm64 support, the
|
||||
// operating system, `linux` can be inferred, so they only have to provide
|
||||
// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
|
||||
// where the architecture may be known but a runtime may support images from
|
||||
// different operating systems.
|
||||
//
|
||||
// Normalization
|
||||
//
|
||||
// Because not all users are familiar with the way the Go runtime represents
|
||||
// platforms, several normalizations have been provided to make this package
|
||||
// easier to user.
|
||||
//
|
||||
// The following are performed for architectures:
|
||||
//
|
||||
// Value Normalized
|
||||
// aarch64 arm64
|
||||
// armhf arm
|
||||
// armel arm/v6
|
||||
// i386 386
|
||||
// x86_64 amd64
|
||||
// x86-64 amd64
|
||||
//
|
||||
// We also normalize the operating system `macos` to `darwin`.
|
||||
//
|
||||
// ARM Support
|
||||
//
|
||||
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
||||
// version. The most common arm version, v7, is represented without the variant
|
||||
// unless it is explicitly provided. This is treated as equivalent to armhf. A
|
||||
// previous architecture, armel, will be normalized to arm/v6.
|
||||
//
|
||||
// While these normalizations are provided, their support on arm platforms has
|
||||
// not yet been fully implemented and tested.
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
|
||||
)
|
||||
|
||||
// Matcher matches platforms specifications, provided by an image or runtime.
|
||||
type Matcher interface {
|
||||
Match(platform specs.Platform) bool
|
||||
}
|
||||
|
||||
// NewMatcher returns a simple matcher based on the provided platform
|
||||
// specification. The returned matcher only looks for equality based on os,
|
||||
// architecture and variant.
|
||||
//
|
||||
// One may implement their own matcher if this doesn't provide the the required
|
||||
// functionality.
|
||||
//
|
||||
// Applications should opt to use `Match` over directly parsing specifiers.
|
||||
func NewMatcher(platform specs.Platform) Matcher {
|
||||
return &matcher{
|
||||
Platform: Normalize(platform),
|
||||
}
|
||||
}
|
||||
|
||||
type matcher struct {
|
||||
specs.Platform
|
||||
}
|
||||
|
||||
func (m *matcher) Match(platform specs.Platform) bool {
|
||||
normalized := Normalize(platform)
|
||||
return m.OS == normalized.OS &&
|
||||
m.Architecture == normalized.Architecture &&
|
||||
m.Variant == normalized.Variant
|
||||
}
|
||||
|
||||
func (m *matcher) String() string {
|
||||
return Format(m.Platform)
|
||||
}
|
||||
|
||||
// Parse parses the platform specifier syntax into a platform declaration.
|
||||
//
|
||||
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
|
||||
// The minimum required information for a platform specifier is the operating
|
||||
// system or architecture. If there is only a single string (no slashes), the
|
||||
// value will be matched against the known set of operating systems, then fall
|
||||
// back to the known set of architectures. The missing component will be
|
||||
// inferred based on the local environment.
|
||||
func Parse(specifier string) (specs.Platform, error) {
|
||||
if strings.Contains(specifier, "*") {
|
||||
// TODO(stevvooe): need to work out exact wildcard handling
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
|
||||
}
|
||||
|
||||
parts := strings.Split(specifier, "/")
|
||||
|
||||
for _, part := range parts {
|
||||
if !specifierRe.MatchString(part) {
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
|
||||
}
|
||||
}
|
||||
|
||||
var p specs.Platform
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// in this case, we will test that the value might be an OS, then look
|
||||
// it up. If it is not known, we'll treat it as an architecture. Since
|
||||
// we have very little information about the platform here, we are
|
||||
// going to be a little more strict if we don't know about the argument
|
||||
// value.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
if isKnownOS(p.OS) {
|
||||
// picks a default architecture
|
||||
p.Architecture = runtime.GOARCH
|
||||
if p.Architecture == "arm" {
|
||||
// TODO(stevvooe): Resolve arm variant, if not v6 (default)
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
p.Architecture, p.Variant = normalizeArch(parts[0], "")
|
||||
if p.Architecture == "arm" && p.Variant == "v7" {
|
||||
p.Variant = ""
|
||||
}
|
||||
if isKnownArch(p.Architecture) {
|
||||
p.OS = runtime.GOOS
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
|
||||
case 2:
|
||||
// In this case, we treat as a regular os/arch pair. We don't care
|
||||
// about whether or not we know of the platform.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], "")
|
||||
if p.Architecture == "arm" && p.Variant == "v7" {
|
||||
p.Variant = ""
|
||||
}
|
||||
|
||||
return p, nil
|
||||
case 3:
|
||||
// we have a fully specified variant, this is rare
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
|
||||
if p.Architecture == "arm64" && p.Variant == "" {
|
||||
p.Variant = "v8"
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
|
||||
}
|
||||
|
||||
// Format returns a string specifier from the provided platform specification.
|
||||
func Format(platform specs.Platform) string {
|
||||
if platform.OS == "" {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
|
||||
}
|
||||
|
||||
func joinNotEmpty(s ...string) string {
|
||||
var ss []string
|
||||
for _, s := range s {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
}
|
||||
|
||||
return strings.Join(ss, "/")
|
||||
}
|
||||
|
||||
// Normalize validates and translate the platform to the canonical value.
|
||||
//
|
||||
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
|
||||
// "x86_64" is encountered, it becomes "amd64".
|
||||
func Normalize(platform specs.Platform) specs.Platform {
|
||||
platform.OS = normalizeOS(platform.OS)
|
||||
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
|
||||
|
||||
// these fields are deprecated, remove them
|
||||
platform.OSFeatures = nil
|
||||
platform.OSVersion = ""
|
||||
|
||||
return platform
|
||||
}
|
||||
83
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
Normal file
83
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
|
||||
github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
|
||||
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
|
||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244
|
||||
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
||||
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
|
||||
github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/docker/go-units v0.3.1
|
||||
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
|
||||
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/gogo/protobuf v1.0.0
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/golang/protobuf v1.1.0
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||
google.golang.org/grpc v1.12.0
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio v0.4.7
|
||||
github.com/Microsoft/hcsshim v0.6.11
|
||||
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
|
||||
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
||||
gotest.tools v2.1.0
|
||||
github.com/google/go-cmp v0.1.0
|
||||
|
||||
github.com/containerd/cri 8bcb9a95394e8d7845da1d6a994d3ac2a86d22f0
|
||||
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
github.com/containernetworking/plugins v0.7.0
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
|
||||
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
||||
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
|
||||
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
||||
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||
github.com/json-iterator/go 1.0.4
|
||||
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d
|
||||
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
github.com/spf13/pflag v1.0.0
|
||||
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
|
||||
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
k8s.io/api 7e796de92438aede7cb5d6bcf6c10f4fa65db560
|
||||
k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9
|
||||
k8s.io/apiserver 4a8377c547bbff4576a35b5b5bf4026d9b5aa763
|
||||
k8s.io/client-go b9a0cf870f239c4a4ecfd3feb075a50e7cbe1473
|
||||
k8s.io/kubernetes v1.10.0
|
||||
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e
|
||||
|
||||
# zfs dependencies
|
||||
github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec
|
||||
github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
|
||||
github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
|
||||
|
||||
# aufs dependencies
|
||||
github.com/containerd/aufs a7fbd554da7a9eafbe5a460a421313a9fd18d988
|
||||
10
vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
generated
vendored
Normal file
10
vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build !windows
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
func Readlink(path string, buf []byte) (n int, err error) {
|
||||
return syscall.Readlink(path, buf)
|
||||
}
|
||||
96
vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
generated
vendored
Normal file
96
vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
package syscallx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
|
||||
// GenericReparseBuffer
|
||||
reparseBuffer byte
|
||||
}
|
||||
|
||||
type mountPointReparseBuffer struct {
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
PathBuffer [1]uint16
|
||||
}
|
||||
|
||||
type symbolicLinkReparseBuffer struct {
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
Flags uint32
|
||||
PathBuffer [1]uint16
|
||||
}
|
||||
|
||||
const (
|
||||
_IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
|
||||
_SYMLINK_FLAG_RELATIVE = 1
|
||||
)
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
func Readlink(path string, buf []byte) (n int, err error) {
|
||||
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer syscall.CloseHandle(fd)
|
||||
|
||||
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
|
||||
var bytesReturned uint32
|
||||
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
|
||||
var s string
|
||||
switch rdb.ReparseTag {
|
||||
case syscall.IO_REPARSE_TAG_SYMLINK:
|
||||
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
|
||||
if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 {
|
||||
if len(s) >= 4 && s[:4] == `\??\` {
|
||||
s = s[4:]
|
||||
switch {
|
||||
case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar
|
||||
// do nothing
|
||||
case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar
|
||||
s = `\\` + s[4:]
|
||||
default:
|
||||
// unexpected; do nothing
|
||||
}
|
||||
} else {
|
||||
// unexpected; do nothing
|
||||
}
|
||||
}
|
||||
case _IO_REPARSE_TAG_MOUNT_POINT:
|
||||
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
|
||||
if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar
|
||||
if len(s) < 48 || s[:11] != `\??\Volume{` {
|
||||
s = s[4:]
|
||||
}
|
||||
} else {
|
||||
// unexpected; do nothing
|
||||
}
|
||||
default:
|
||||
// the path is not a symlink or junction but another type of reparse
|
||||
// point
|
||||
return -1, syscall.ENOENT
|
||||
}
|
||||
n = copy(buf, []byte(s))
|
||||
|
||||
return n, nil
|
||||
}
|
||||
112
vendor/github.com/containerd/continuity/sysx/file_posix.go
generated
vendored
Normal file
112
vendor/github.com/containerd/continuity/sysx/file_posix.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/continuity/syscallx"
|
||||
)
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func Readlink(name string) (string, error) {
|
||||
for len := 128; ; len *= 2 {
|
||||
b := make([]byte, len)
|
||||
n, e := fixCount(syscallx.Readlink(fixLongPath(name), b))
|
||||
if e != nil {
|
||||
return "", &os.PathError{Op: "readlink", Path: name, Err: e}
|
||||
}
|
||||
if n < len {
|
||||
return string(b[0:n]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Many functions in package syscall return a count of -1 instead of 0.
|
||||
// Using fixCount(call()) instead of call() corrects the count.
|
||||
func fixCount(n int, err error) (int, error) {
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// fixLongPath returns the extended-length (\\?\-prefixed) form of
|
||||
// path when needed, in order to avoid the default 260 character file
|
||||
// path limit imposed by Windows. If path is not easily converted to
|
||||
// the extended-length form (for example, if path is a relative path
|
||||
// or contains .. elements), or is short enough, fixLongPath returns
|
||||
// path unmodified.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
func fixLongPath(path string) string {
|
||||
// Do nothing (and don't allocate) if the path is "short".
|
||||
// Empirically (at least on the Windows Server 2013 builder),
|
||||
// the kernel is arbitrarily okay with < 248 bytes. That
|
||||
// matches what the docs above say:
|
||||
// "When using an API to create a directory, the specified
|
||||
// path cannot be so long that you cannot append an 8.3 file
|
||||
// name (that is, the directory name cannot exceed MAX_PATH
|
||||
// minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
|
||||
//
|
||||
// The MSDN docs appear to say that a normal path that is 248 bytes long
|
||||
// will work; empirically the path must be less then 248 bytes long.
|
||||
if len(path) < 248 {
|
||||
// Don't fix. (This is how Go 1.7 and earlier worked,
|
||||
// not automatically generating the \\?\ form)
|
||||
return path
|
||||
}
|
||||
|
||||
// The extended form begins with \\?\, as in
|
||||
// \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
|
||||
// The extended form disables evaluation of . and .. path
|
||||
// elements and disables the interpretation of / as equivalent
|
||||
// to \. The conversion here rewrites / to \ and elides
|
||||
// . elements as well as trailing or duplicate separators. For
|
||||
// simplicity it avoids the conversion entirely for relative
|
||||
// paths or paths containing .. elements. For now,
|
||||
// \\server\share paths are not converted to
|
||||
// \\?\UNC\server\share paths because the rules for doing so
|
||||
// are less well-specified.
|
||||
if len(path) >= 2 && path[:2] == `\\` {
|
||||
// Don't canonicalize UNC paths.
|
||||
return path
|
||||
}
|
||||
if !filepath.IsAbs(path) {
|
||||
// Relative path
|
||||
return path
|
||||
}
|
||||
|
||||
const prefix = `\\?`
|
||||
|
||||
pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
|
||||
copy(pathbuf, prefix)
|
||||
n := len(path)
|
||||
r, w := 0, len(prefix)
|
||||
for r < n {
|
||||
switch {
|
||||
case os.IsPathSeparator(path[r]):
|
||||
// empty block
|
||||
r++
|
||||
case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
|
||||
// /./
|
||||
r++
|
||||
case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
|
||||
// /../ is currently unhandled
|
||||
return path
|
||||
default:
|
||||
pathbuf[w] = '\\'
|
||||
w++
|
||||
for ; r < n && !os.IsPathSeparator(path[r]); r++ {
|
||||
pathbuf[w] = path[r]
|
||||
w++
|
||||
}
|
||||
}
|
||||
}
|
||||
// A drive's root directory needs a trailing \
|
||||
if w == len(`\\?\c:`) {
|
||||
pathbuf[w] = '\\'
|
||||
w++
|
||||
}
|
||||
return string(pathbuf[:w])
|
||||
}
|
||||
35
vendor/github.com/containerd/continuity/sysx/xattr_linux.go
generated
vendored
35
vendor/github.com/containerd/continuity/sysx/xattr_linux.go
generated
vendored
@ -1,61 +1,44 @@
|
||||
package sysx
|
||||
|
||||
import "syscall"
|
||||
|
||||
// These functions will be generated by generate.sh
|
||||
// $ GOOS=linux GOARCH=386 ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=amd64 ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=arm ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=arm64 ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=ppc64 ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=ppc64le ./generate.sh xattr
|
||||
// $ GOOS=linux GOARCH=s390x ./generate.sh xattr
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Listxattr calls syscall listxattr and reads all content
|
||||
// and returns a string array
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, syscall.Listxattr)
|
||||
return listxattrAll(path, unix.Listxattr)
|
||||
}
|
||||
|
||||
// Removexattr calls syscall removexattr
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return syscall.Removexattr(path, attr)
|
||||
return unix.Removexattr(path, attr)
|
||||
}
|
||||
|
||||
// Setxattr calls syscall setxattr
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return syscall.Setxattr(path, attr, data, flags)
|
||||
return unix.Setxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
// Getxattr calls syscall getxattr
|
||||
func Getxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, syscall.Getxattr)
|
||||
return getxattrAll(path, attr, unix.Getxattr)
|
||||
}
|
||||
|
||||
//sys llistxattr(path string, dest []byte) (sz int, err error)
|
||||
|
||||
// LListxattr lists xattrs, not following symlinks
|
||||
func LListxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, llistxattr)
|
||||
return listxattrAll(path, unix.Llistxattr)
|
||||
}
|
||||
|
||||
//sys lremovexattr(path string, attr string) (err error)
|
||||
|
||||
// LRemovexattr removes an xattr, not following symlinks
|
||||
func LRemovexattr(path string, attr string) (err error) {
|
||||
return lremovexattr(path, attr)
|
||||
return unix.Lremovexattr(path, attr)
|
||||
}
|
||||
|
||||
//sys lsetxattr(path string, attr string, data []byte, flags int) (err error)
|
||||
|
||||
// LSetxattr sets an xattr, not following symlinks
|
||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return lsetxattr(path, attr, data, flags)
|
||||
return unix.Lsetxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
//sys lgetxattr(path string, attr string, dest []byte) (sz int, err error)
|
||||
|
||||
// LGetxattr gets an xattr, not following symlinks
|
||||
func LGetxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, lgetxattr)
|
||||
return getxattrAll(path, attr, unix.Lgetxattr)
|
||||
}
|
||||
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl -l32 xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl -l32 xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go
generated
vendored
111
vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// mksyscall.pl xattr_linux.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func llistxattr(path string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
|
||||
use(unsafe.Pointer(_p0))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lremovexattr(path string, attr string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p2 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
|
||||
use(unsafe.Pointer(_p0))
|
||||
use(unsafe.Pointer(_p1))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
6
vendor/github.com/docker/docker-credential-helpers/README.md
generated
vendored
6
vendor/github.com/docker/docker-credential-helpers/README.md
generated
vendored
@ -55,6 +55,12 @@ You can see examples of each function in the [client](https://godoc.org/github.c
|
||||
1. osxkeychain: Provides a helper to use the OS X keychain as credentials store.
|
||||
2. secretservice: Provides a helper to use the D-Bus secret service as credentials store.
|
||||
3. wincred: Provides a helper to use Windows credentials manager as store.
|
||||
4. pass: Provides a helper to use `pass` as credentials store.
|
||||
|
||||
#### Note
|
||||
|
||||
`pass` needs to be configured for `docker-credential-pass` to work properly.
|
||||
It must be initialized with a `gpg2` key ID. Make sure your GPG key exists is in `gpg2` keyring as `pass` uses `gpg2` instead of the regular `gpg`.
|
||||
|
||||
## Development
|
||||
|
||||
|
||||
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
package credentials
|
||||
|
||||
// Version holds a string describing the current version
|
||||
const Version = "0.5.2"
|
||||
const Version = "0.6.0"
|
||||
|
||||
208
vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go
generated
vendored
208
vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go
generated
vendored
@ -1,208 +0,0 @@
|
||||
// A `pass` based credential helper. Passwords are stored as arguments to pass
|
||||
// of the form: "$PASS_FOLDER/base64-url(serverURL)/username". We base64-url
|
||||
// encode the serverURL, because under the hood pass uses files and folders, so
|
||||
// /s will get translated into additional folders.
|
||||
package pass
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
)
|
||||
|
||||
const PASS_FOLDER = "docker-credential-helpers"
|
||||
|
||||
var (
|
||||
PassInitialized bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
PassInitialized = exec.Command("pass").Run() == nil
|
||||
}
|
||||
|
||||
func runPass(stdinContent string, args ...string) (string, error) {
|
||||
cmd := exec.Command("pass", args...)
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stdin.Close()
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stderr.Close()
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stdout.Close()
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = stdin.Write([]byte(stdinContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
errContent, err := ioutil.ReadAll(stderr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading stderr: %s", err)
|
||||
}
|
||||
|
||||
result, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading stdout: %s", err)
|
||||
}
|
||||
|
||||
cmdErr := cmd.Wait()
|
||||
if cmdErr != nil {
|
||||
return "", fmt.Errorf("%s: %s", cmdErr, errContent)
|
||||
}
|
||||
|
||||
return string(result), nil
|
||||
}
|
||||
|
||||
// Pass handles secrets using Linux secret-service as a store.
|
||||
type Pass struct{}
|
||||
|
||||
// Add adds new credentials to the keychain.
|
||||
func (h Pass) Add(creds *credentials.Credentials) error {
|
||||
if !PassInitialized {
|
||||
return errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if creds == nil {
|
||||
return errors.New("missing credentials")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(creds.ServerURL))
|
||||
|
||||
_, err := runPass(creds.Secret, "insert", "-f", "-m", path.Join(PASS_FOLDER, encoded, creds.Username))
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes credentials from the store.
|
||||
func (h Pass) Delete(serverURL string) error {
|
||||
if !PassInitialized {
|
||||
return errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if serverURL == "" {
|
||||
return errors.New("missing server url")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(serverURL))
|
||||
_, err := runPass("", "rm", "-rf", path.Join(PASS_FOLDER, encoded))
|
||||
return err
|
||||
}
|
||||
|
||||
// listPassDir lists all the contents of a directory in the password store.
|
||||
// Pass uses fancy unicode to emit stuff to stdout, so rather than try
|
||||
// and parse this, let's just look at the directory structure instead.
|
||||
func listPassDir(args ...string) ([]os.FileInfo, error) {
|
||||
passDir := os.ExpandEnv("$HOME/.password-store")
|
||||
for _, e := range os.Environ() {
|
||||
parts := strings.SplitN(e, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if parts[0] != "PASSWORD_STORE_DIR" {
|
||||
continue
|
||||
}
|
||||
|
||||
passDir = parts[1]
|
||||
break
|
||||
}
|
||||
|
||||
p := path.Join(append([]string{passDir, PASS_FOLDER}, args...)...)
|
||||
contents, err := ioutil.ReadDir(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return []os.FileInfo{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
// Get returns the username and secret to use for a given registry server URL.
|
||||
func (h Pass) Get(serverURL string) (string, string, error) {
|
||||
if !PassInitialized {
|
||||
return "", "", errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if serverURL == "" {
|
||||
return "", "", errors.New("missing server url")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(serverURL))
|
||||
|
||||
usernames, err := listPassDir(encoded)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if len(usernames) < 1 {
|
||||
return "", "", fmt.Errorf("no usernames for %s", serverURL)
|
||||
}
|
||||
|
||||
actual := strings.TrimSuffix(usernames[0].Name(), ".gpg")
|
||||
secret, err := runPass("", "show", path.Join(PASS_FOLDER, encoded, actual))
|
||||
return actual, secret, err
|
||||
}
|
||||
|
||||
// List returns the stored URLs and corresponding usernames for a given credentials label
|
||||
func (h Pass) List() (map[string]string, error) {
|
||||
if !PassInitialized {
|
||||
return nil, errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
servers, err := listPassDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := map[string]string{}
|
||||
|
||||
for _, server := range servers {
|
||||
if !server.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
serverURL, err := base64.URLEncoding.DecodeString(server.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usernames, err := listPassDir(server.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(usernames) < 1 {
|
||||
return nil, fmt.Errorf("no usernames for %s", serverURL)
|
||||
}
|
||||
|
||||
resp[string(serverURL)] = strings.TrimSuffix(usernames[0].Name(), ".gpg")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
2
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
||||
|
||||
9
vendor/github.com/docker/docker/client/image_build.go
generated
vendored
9
vendor/github.com/docker/docker/client/image_build.go
generated
vendored
@ -30,12 +30,6 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio
|
||||
}
|
||||
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
|
||||
|
||||
if options.Platform != "" {
|
||||
if err := cli.NewVersionError("1.32", "platform"); err != nil {
|
||||
return types.ImageBuildResponse{}, err
|
||||
}
|
||||
query.Set("platform", options.Platform)
|
||||
}
|
||||
headers.Set("Content-Type", "application/x-tar")
|
||||
|
||||
serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
|
||||
@ -131,6 +125,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur
|
||||
query.Set("session", options.SessionID)
|
||||
}
|
||||
if options.Platform != "" {
|
||||
if err := cli.NewVersionError("1.32", "platform"); err != nil {
|
||||
return query, err
|
||||
}
|
||||
query.Set("platform", strings.ToLower(options.Platform))
|
||||
}
|
||||
if options.BuildID != "" {
|
||||
|
||||
27
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
27
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
@ -284,30 +284,3 @@ func clen(n []byte) int {
|
||||
}
|
||||
return len(n)
|
||||
}
|
||||
|
||||
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, overlayDeletedFile, nil)
|
||||
}
|
||||
|
||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
|
||||
}
|
||||
|
||||
4
vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
generated
vendored
4
vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
generated
vendored
@ -139,14 +139,14 @@ type AuxFormatter struct {
|
||||
}
|
||||
|
||||
// Emit emits the given interface as an aux progress message
|
||||
func (sf *AuxFormatter) Emit(aux interface{}) error {
|
||||
func (sf *AuxFormatter) Emit(id string, aux interface{}) error {
|
||||
auxJSONBytes, err := json.Marshal(aux)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
auxJSON := new(json.RawMessage)
|
||||
*auxJSON = auxJSONBytes
|
||||
msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON})
|
||||
msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Aux: auxJSON})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
67
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
67
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
@ -1,69 +1,32 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ValidatePlatform determines if a platform structure is valid.
|
||||
// TODO This is a temporary function - can be replaced by parsing from
|
||||
// https://github.com/containerd/containerd/pull/1403/files at a later date.
|
||||
// @jhowardmsft
|
||||
func ValidatePlatform(platform *specs.Platform) error {
|
||||
platform.Architecture = strings.ToLower(platform.Architecture)
|
||||
platform.OS = strings.ToLower(platform.OS)
|
||||
// Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do
|
||||
// not support anything except operating system.
|
||||
if platform.Architecture != "" {
|
||||
return fmt.Errorf("invalid platform architecture %q", platform.Architecture)
|
||||
}
|
||||
if platform.OS != "" {
|
||||
if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
|
||||
return fmt.Errorf("invalid platform os %q", platform.OS)
|
||||
}
|
||||
}
|
||||
if len(platform.OSFeatures) != 0 {
|
||||
return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures)
|
||||
}
|
||||
if platform.OSVersion != "" {
|
||||
return fmt.Errorf("invalid platform osversion %q", platform.OSVersion)
|
||||
}
|
||||
if platform.Variant != "" {
|
||||
return fmt.Errorf("invalid platform variant %q", platform.Variant)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParsePlatform parses a platform string in the format os[/arch[/variant]
|
||||
// into an OCI image-spec platform structure.
|
||||
// TODO This is a temporary function - can be replaced by parsing from
|
||||
// https://github.com/containerd/containerd/pull/1403/files at a later date.
|
||||
// @jhowardmsft
|
||||
func ParsePlatform(in string) *specs.Platform {
|
||||
p := &specs.Platform{}
|
||||
elements := strings.SplitN(strings.ToLower(in), "/", 3)
|
||||
if len(elements) == 3 {
|
||||
p.Variant = elements[2]
|
||||
}
|
||||
if len(elements) >= 2 {
|
||||
p.Architecture = elements[1]
|
||||
}
|
||||
if len(elements) >= 1 {
|
||||
p.OS = elements[0]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// IsOSSupported determines if an operating system is supported by the host
|
||||
func IsOSSupported(os string) bool {
|
||||
if runtime.GOOS == os {
|
||||
if strings.EqualFold(runtime.GOOS, os) {
|
||||
return true
|
||||
}
|
||||
if LCOWSupported() && os == "linux" {
|
||||
if LCOWSupported() && strings.EqualFold(os, "linux") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePlatform determines if a platform structure is valid.
|
||||
// TODO This is a temporary windows-only function, should be replaced by
|
||||
// comparison of worker capabilities
|
||||
func ValidatePlatform(platform specs.Platform) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
|
||||
return errors.Errorf("unsupported os %s", platform.OS)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
23
vendor/github.com/docker/docker/vendor.conf
generated
vendored
23
vendor/github.com/docker/docker/vendor.conf
generated
vendored
@ -1,7 +1,7 @@
|
||||
# the following lines are in sorted order, FYI
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/Microsoft/hcsshim v0.6.11
|
||||
github.com/Microsoft/go-winio v0.4.7
|
||||
github.com/Microsoft/go-winio v0.4.8
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
|
||||
github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a
|
||||
@ -18,8 +18,7 @@ golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd
|
||||
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029
|
||||
gotest.tools v2.1.0
|
||||
github.com/google/go-cmp v0.2.0
|
||||
|
||||
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
||||
@ -27,7 +26,7 @@ github.com/imdario/mergo v0.3.5
|
||||
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
||||
|
||||
# buildkit
|
||||
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
|
||||
github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb
|
||||
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
@ -38,14 +37,14 @@ github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
||||
#get libnetwork packages
|
||||
|
||||
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
|
||||
github.com/docker/libnetwork 19279f0492417475b6bfbd0aa529f73e8f178fb5
|
||||
github.com/docker/libnetwork d00ceed44cc447c77f25cdf5d59e83163bdcb4c9
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||
github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c
|
||||
github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
|
||||
github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
|
||||
github.com/hashicorp/go-sockaddr 6d291a969b86c4b633730bfc6b8b9d64c3aafed9
|
||||
github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
|
||||
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
|
||||
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
|
||||
@ -76,7 +75,7 @@ github.com/pborman/uuid v1.0
|
||||
google.golang.org/grpc v1.12.0
|
||||
|
||||
# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal
|
||||
github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
|
||||
github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
@ -115,18 +114,18 @@ github.com/googleapis/gax-go v2.0.0
|
||||
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b
|
||||
github.com/containerd/containerd b41633746ed4833f52c3c071e8edcfa2713e5677
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
||||
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
|
||||
github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
|
||||
github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761
|
||||
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd
|
||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
|
||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||
github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit edd5641391926a50bc5f7040e20b7efc05003c26
|
||||
github.com/docker/swarmkit 199cf49cd99690135d99e52a1907ec82e8113c4f
|
||||
github.com/gogo/protobuf v1.0.0
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
|
||||
|
||||
261
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
261
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
@ -2014,6 +2014,10 @@ func sozObjects(x uint64) (n int) {
|
||||
|
||||
type NodeCheckFunc func(t1, t2 *Node) bool
|
||||
|
||||
type EventNode interface {
|
||||
IsEventNode() bool
|
||||
}
|
||||
|
||||
type EventCreateNode struct {
|
||||
Node *Node
|
||||
Checks []NodeCheckFunc
|
||||
@ -2033,6 +2037,14 @@ func (e EventCreateNode) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateNode) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateNode) IsEventNode() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateNode struct {
|
||||
Node *Node
|
||||
OldNode *Node
|
||||
@ -2053,6 +2065,14 @@ func (e EventUpdateNode) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateNode) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateNode) IsEventNode() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteNode struct {
|
||||
Node *Node
|
||||
Checks []NodeCheckFunc
|
||||
@ -2071,6 +2091,15 @@ func (e EventDeleteNode) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteNode) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteNode) IsEventNode() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Node) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -2261,6 +2290,10 @@ func (indexer NodeCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, er
|
||||
|
||||
type ServiceCheckFunc func(t1, t2 *Service) bool
|
||||
|
||||
type EventService interface {
|
||||
IsEventService() bool
|
||||
}
|
||||
|
||||
type EventCreateService struct {
|
||||
Service *Service
|
||||
Checks []ServiceCheckFunc
|
||||
@ -2280,6 +2313,14 @@ func (e EventCreateService) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateService) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateService) IsEventService() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateService struct {
|
||||
Service *Service
|
||||
OldService *Service
|
||||
@ -2300,6 +2341,14 @@ func (e EventUpdateService) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateService) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateService) IsEventService() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteService struct {
|
||||
Service *Service
|
||||
Checks []ServiceCheckFunc
|
||||
@ -2318,6 +2367,15 @@ func (e EventDeleteService) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteService) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteService) IsEventService() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Service) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -2478,6 +2536,10 @@ func (indexer ServiceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte,
|
||||
|
||||
type TaskCheckFunc func(t1, t2 *Task) bool
|
||||
|
||||
type EventTask interface {
|
||||
IsEventTask() bool
|
||||
}
|
||||
|
||||
type EventCreateTask struct {
|
||||
Task *Task
|
||||
Checks []TaskCheckFunc
|
||||
@ -2497,6 +2559,14 @@ func (e EventCreateTask) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateTask) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateTask) IsEventTask() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateTask struct {
|
||||
Task *Task
|
||||
OldTask *Task
|
||||
@ -2517,6 +2587,14 @@ func (e EventUpdateTask) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateTask) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateTask) IsEventTask() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteTask struct {
|
||||
Task *Task
|
||||
Checks []TaskCheckFunc
|
||||
@ -2535,6 +2613,15 @@ func (e EventDeleteTask) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteTask) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteTask) IsEventTask() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Task) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -2738,6 +2825,10 @@ func (indexer TaskCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, er
|
||||
|
||||
type NetworkCheckFunc func(t1, t2 *Network) bool
|
||||
|
||||
type EventNetwork interface {
|
||||
IsEventNetwork() bool
|
||||
}
|
||||
|
||||
type EventCreateNetwork struct {
|
||||
Network *Network
|
||||
Checks []NetworkCheckFunc
|
||||
@ -2757,6 +2848,14 @@ func (e EventCreateNetwork) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateNetwork) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateNetwork) IsEventNetwork() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateNetwork struct {
|
||||
Network *Network
|
||||
OldNetwork *Network
|
||||
@ -2777,6 +2876,14 @@ func (e EventUpdateNetwork) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateNetwork) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateNetwork) IsEventNetwork() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteNetwork struct {
|
||||
Network *Network
|
||||
Checks []NetworkCheckFunc
|
||||
@ -2795,6 +2902,15 @@ func (e EventDeleteNetwork) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteNetwork) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteNetwork) IsEventNetwork() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Network) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -2955,6 +3071,10 @@ func (indexer NetworkCustomIndexer) FromObject(obj interface{}) (bool, [][]byte,
|
||||
|
||||
type ClusterCheckFunc func(t1, t2 *Cluster) bool
|
||||
|
||||
type EventCluster interface {
|
||||
IsEventCluster() bool
|
||||
}
|
||||
|
||||
type EventCreateCluster struct {
|
||||
Cluster *Cluster
|
||||
Checks []ClusterCheckFunc
|
||||
@ -2974,6 +3094,14 @@ func (e EventCreateCluster) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateCluster) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateCluster) IsEventCluster() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateCluster struct {
|
||||
Cluster *Cluster
|
||||
OldCluster *Cluster
|
||||
@ -2994,6 +3122,14 @@ func (e EventUpdateCluster) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateCluster) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateCluster) IsEventCluster() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteCluster struct {
|
||||
Cluster *Cluster
|
||||
Checks []ClusterCheckFunc
|
||||
@ -3012,6 +3148,15 @@ func (e EventDeleteCluster) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteCluster) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteCluster) IsEventCluster() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Cluster) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -3172,6 +3317,10 @@ func (indexer ClusterCustomIndexer) FromObject(obj interface{}) (bool, [][]byte,
|
||||
|
||||
type SecretCheckFunc func(t1, t2 *Secret) bool
|
||||
|
||||
type EventSecret interface {
|
||||
IsEventSecret() bool
|
||||
}
|
||||
|
||||
type EventCreateSecret struct {
|
||||
Secret *Secret
|
||||
Checks []SecretCheckFunc
|
||||
@ -3191,6 +3340,14 @@ func (e EventCreateSecret) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateSecret) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateSecret) IsEventSecret() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateSecret struct {
|
||||
Secret *Secret
|
||||
OldSecret *Secret
|
||||
@ -3211,6 +3368,14 @@ func (e EventUpdateSecret) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateSecret) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateSecret) IsEventSecret() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteSecret struct {
|
||||
Secret *Secret
|
||||
Checks []SecretCheckFunc
|
||||
@ -3229,6 +3394,15 @@ func (e EventDeleteSecret) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteSecret) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteSecret) IsEventSecret() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Secret) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -3389,6 +3563,10 @@ func (indexer SecretCustomIndexer) FromObject(obj interface{}) (bool, [][]byte,
|
||||
|
||||
type ConfigCheckFunc func(t1, t2 *Config) bool
|
||||
|
||||
type EventConfig interface {
|
||||
IsEventConfig() bool
|
||||
}
|
||||
|
||||
type EventCreateConfig struct {
|
||||
Config *Config
|
||||
Checks []ConfigCheckFunc
|
||||
@ -3408,6 +3586,14 @@ func (e EventCreateConfig) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateConfig) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateConfig) IsEventConfig() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateConfig struct {
|
||||
Config *Config
|
||||
OldConfig *Config
|
||||
@ -3428,6 +3614,14 @@ func (e EventUpdateConfig) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateConfig) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateConfig) IsEventConfig() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteConfig struct {
|
||||
Config *Config
|
||||
Checks []ConfigCheckFunc
|
||||
@ -3446,6 +3640,15 @@ func (e EventDeleteConfig) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteConfig) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteConfig) IsEventConfig() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Config) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -3606,6 +3809,10 @@ func (indexer ConfigCustomIndexer) FromObject(obj interface{}) (bool, [][]byte,
|
||||
|
||||
type ResourceCheckFunc func(t1, t2 *Resource) bool
|
||||
|
||||
type EventResource interface {
|
||||
IsEventResource() bool
|
||||
}
|
||||
|
||||
type EventCreateResource struct {
|
||||
Resource *Resource
|
||||
Checks []ResourceCheckFunc
|
||||
@ -3625,6 +3832,14 @@ func (e EventCreateResource) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateResource) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateResource) IsEventResource() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateResource struct {
|
||||
Resource *Resource
|
||||
OldResource *Resource
|
||||
@ -3645,6 +3860,14 @@ func (e EventUpdateResource) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateResource) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateResource) IsEventResource() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteResource struct {
|
||||
Resource *Resource
|
||||
Checks []ResourceCheckFunc
|
||||
@ -3663,6 +3886,15 @@ func (e EventDeleteResource) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteResource) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteResource) IsEventResource() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Resource) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
@ -3829,6 +4061,10 @@ func (indexer ResourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte
|
||||
|
||||
type ExtensionCheckFunc func(t1, t2 *Extension) bool
|
||||
|
||||
type EventExtension interface {
|
||||
IsEventExtension() bool
|
||||
}
|
||||
|
||||
type EventCreateExtension struct {
|
||||
Extension *Extension
|
||||
Checks []ExtensionCheckFunc
|
||||
@ -3848,6 +4084,14 @@ func (e EventCreateExtension) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateExtension) IsEventCreate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventCreateExtension) IsEventExtension() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventUpdateExtension struct {
|
||||
Extension *Extension
|
||||
OldExtension *Extension
|
||||
@ -3868,6 +4112,14 @@ func (e EventUpdateExtension) Matches(apiEvent go_events.Event) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateExtension) IsEventUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventUpdateExtension) IsEventExtension() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type EventDeleteExtension struct {
|
||||
Extension *Extension
|
||||
Checks []ExtensionCheckFunc
|
||||
@ -3886,6 +4138,15 @@ func (e EventDeleteExtension) Matches(apiEvent go_events.Event) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteExtension) IsEventDelete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e EventDeleteExtension) IsEventExtension() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Extension) CopyStoreObject() StoreObject {
|
||||
return m.Copy()
|
||||
}
|
||||
|
||||
15
vendor/github.com/docker/swarmkit/api/storeobject.go
generated
vendored
15
vendor/github.com/docker/swarmkit/api/storeobject.go
generated
vendored
@ -38,6 +38,21 @@ type Event interface {
|
||||
Matches(events.Event) bool
|
||||
}
|
||||
|
||||
// EventCreate is an interface implemented by every creation event type
|
||||
type EventCreate interface {
|
||||
IsEventCreate() bool
|
||||
}
|
||||
|
||||
// EventUpdate is an interface impelemented by every update event type
|
||||
type EventUpdate interface {
|
||||
IsEventUpdate() bool
|
||||
}
|
||||
|
||||
// EventDelete is an interface implemented by every delete event type
|
||||
type EventDelete interface {
|
||||
IsEventDelete()
|
||||
}
|
||||
|
||||
func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) {
|
||||
var converted [][]byte
|
||||
|
||||
|
||||
18
vendor/github.com/moby/buildkit/README.md
generated
vendored
18
vendor/github.com/moby/buildkit/README.md
generated
vendored
@ -1,5 +1,3 @@
|
||||
### Important: This repository is in an early development phase
|
||||
|
||||
[](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
|
||||
|
||||
|
||||
@ -29,6 +27,16 @@ Read the proposal from https://github.com/moby/moby/issues/32925
|
||||
|
||||
Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317
|
||||
|
||||
### Used by
|
||||
|
||||
[Moby](https://github.com/moby/moby/pull/37151)
|
||||
|
||||
[img](https://github.com/genuinetools/img)
|
||||
|
||||
[OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
|
||||
|
||||
[container build interface](https://github.com/containerbuilding/cbi)
|
||||
|
||||
### Quick start
|
||||
|
||||
Dependencies:
|
||||
@ -130,11 +138,11 @@ docker inspect myimage
|
||||
|
||||
##### Building a Dockerfile using [external frontend](https://hub.docker.com/r/tonistiigi/dockerfile/tags/):
|
||||
|
||||
During development, an external version of the Dockerfile frontend is pushed to https://hub.docker.com/r/tonistiigi/dockerfile that can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)).
|
||||
During development, an external version of the Dockerfile frontend is pushed to https://hub.docker.com/r/tonistiigi/dockerfile that can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `tonistiigi/dockerfile:master` image can be used.
|
||||
|
||||
```
|
||||
buildctl build --frontend=gateway.v0 --frontend-opt=source=tonistiigi/dockerfile:v0 --local context=. --local dockerfile=.
|
||||
buildctl build --frontend gateway.v0 --frontend-opt=source=tonistiigi/dockerfile:v0 --frontend-opt=context=git://github.com/moby/moby --frontend-opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
|
||||
buildctl build --frontend=gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --local context=. --local dockerfile=.
|
||||
buildctl build --frontend gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --frontend-opt=context=git://github.com/moby/moby --frontend-opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
|
||||
````
|
||||
|
||||
### Exporters
|
||||
|
||||
214
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
214
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
@ -542,8 +542,9 @@ func (m *ListWorkersResponse) GetRecord() []*WorkerRecord {
|
||||
}
|
||||
|
||||
type WorkerRecord struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
|
||||
}
|
||||
|
||||
func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
|
||||
@ -565,6 +566,13 @@ func (m *WorkerRecord) GetLabels() map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *WorkerRecord) GetPlatforms() []pb.Platform {
|
||||
if m != nil {
|
||||
return m.Platforms
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest")
|
||||
proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
|
||||
@ -1650,6 +1658,18 @@ func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if len(m.Platforms) > 0 {
|
||||
for _, msg := range m.Platforms {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintControl(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -1979,6 +1999,12 @@ func (m *WorkerRecord) Size() (n int) {
|
||||
n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
if len(m.Platforms) > 0 {
|
||||
for _, e := range m.Platforms {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovControl(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -4663,6 +4689,37 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Labels[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowControl
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Platforms = append(m.Platforms, pb.Platform{})
|
||||
if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipControl(dAtA[iNdEx:])
|
||||
@ -4792,80 +4849,81 @@ var (
|
||||
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
||||
|
||||
var fileDescriptorControl = []byte{
|
||||
// 1192 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0x23, 0x45,
|
||||
0x10, 0x66, 0x6c, 0xc7, 0x3f, 0x65, 0x27, 0x0a, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00, 0xc9,
|
||||
0x8a, 0x76, 0xc7, 0xd9, 0xc0, 0x22, 0xc8, 0x61, 0xb5, 0xeb, 0x78, 0x11, 0x89, 0x12, 0xb1, 0x74,
|
||||
0x36, 0xac, 0xc4, 0x6d, 0x6c, 0x77, 0xbc, 0xa3, 0xd8, 0xd3, 0xa6, 0xbb, 0x27, 0xda, 0xf0, 0x14,
|
||||
0x1c, 0xb8, 0xf2, 0x14, 0x1c, 0x38, 0x73, 0x40, 0xda, 0x23, 0x67, 0x0e, 0x59, 0x94, 0x3b, 0x3c,
|
||||
0x03, 0xea, 0x9f, 0xb1, 0xdb, 0x1e, 0xe7, 0xc7, 0xd9, 0x53, 0xba, 0x3a, 0x5f, 0x7d, 0x53, 0x5d,
|
||||
0x5f, 0xb9, 0xaa, 0x60, 0xb9, 0x4b, 0x63, 0xc1, 0xe8, 0x20, 0x18, 0x31, 0x2a, 0x28, 0x5a, 0x1d,
|
||||
0xd2, 0xce, 0x59, 0xd0, 0x49, 0xa2, 0x41, 0xef, 0x24, 0x12, 0xc1, 0xe9, 0x7d, 0xef, 0x5e, 0x3f,
|
||||
0x12, 0x2f, 0x92, 0x4e, 0xd0, 0xa5, 0xc3, 0x66, 0x9f, 0xf6, 0x69, 0x53, 0x01, 0x3b, 0xc9, 0xb1,
|
||||
0xb2, 0x94, 0xa1, 0x4e, 0x9a, 0xc0, 0x5b, 0xef, 0x53, 0xda, 0x1f, 0x90, 0x09, 0x4a, 0x44, 0x43,
|
||||
0xc2, 0x45, 0x38, 0x1c, 0x19, 0xc0, 0x5d, 0x8b, 0x4f, 0x7e, 0xac, 0x99, 0x7e, 0xac, 0xc9, 0xe9,
|
||||
0xe0, 0x94, 0xb0, 0xe6, 0xa8, 0xd3, 0xa4, 0x23, 0xae, 0xd1, 0xfe, 0x0a, 0xd4, 0x9e, 0xb2, 0x24,
|
||||
0x26, 0x98, 0xfc, 0x98, 0x10, 0x2e, 0xfc, 0x0d, 0x58, 0x6d, 0x47, 0xfc, 0xe4, 0x88, 0x87, 0xfd,
|
||||
0xf4, 0x0e, 0xdd, 0x81, 0xe2, 0x71, 0x34, 0x10, 0x84, 0xb9, 0x4e, 0xdd, 0x69, 0x54, 0xb0, 0xb1,
|
||||
0xfc, 0x3d, 0x78, 0xdb, 0xc2, 0xf2, 0x11, 0x8d, 0x39, 0x41, 0x0f, 0xa0, 0xc8, 0x48, 0x97, 0xb2,
|
||||
0x9e, 0xeb, 0xd4, 0xf3, 0x8d, 0xea, 0xd6, 0x87, 0xc1, 0xec, 0x8b, 0x03, 0xe3, 0x20, 0x41, 0xd8,
|
||||
0x80, 0xfd, 0x3f, 0x72, 0x50, 0xb5, 0xee, 0xd1, 0x0a, 0xe4, 0x76, 0xdb, 0xe6, 0x7b, 0xb9, 0xdd,
|
||||
0x36, 0x72, 0xa1, 0x74, 0x90, 0x88, 0xb0, 0x33, 0x20, 0x6e, 0xae, 0xee, 0x34, 0xca, 0x38, 0x35,
|
||||
0xd1, 0xbb, 0xb0, 0xb4, 0x1b, 0x1f, 0x71, 0xe2, 0xe6, 0xd5, 0xbd, 0x36, 0x10, 0x82, 0xc2, 0x61,
|
||||
0xf4, 0x13, 0x71, 0x0b, 0x75, 0xa7, 0x91, 0xc7, 0xea, 0x2c, 0xdf, 0xf1, 0x34, 0x64, 0x24, 0x16,
|
||||
0xee, 0x92, 0x7e, 0x87, 0xb6, 0x50, 0x0b, 0x2a, 0x3b, 0x8c, 0x84, 0x82, 0xf4, 0x1e, 0x0b, 0xb7,
|
||||
0x58, 0x77, 0x1a, 0xd5, 0x2d, 0x2f, 0xd0, 0x69, 0x0e, 0xd2, 0x34, 0x07, 0xcf, 0xd2, 0x34, 0xb7,
|
||||
0xca, 0xaf, 0xce, 0xd7, 0xdf, 0xfa, 0xf9, 0xf5, 0xba, 0x83, 0x27, 0x6e, 0xe8, 0x11, 0xc0, 0x7e,
|
||||
0xc8, 0xc5, 0x11, 0x57, 0x24, 0xa5, 0x6b, 0x49, 0x0a, 0x8a, 0xc0, 0xf2, 0x41, 0x6b, 0x00, 0x2a,
|
||||
0x01, 0x3b, 0x34, 0x89, 0x85, 0x5b, 0x56, 0x71, 0x5b, 0x37, 0xa8, 0x0e, 0xd5, 0x36, 0xe1, 0x5d,
|
||||
0x16, 0x8d, 0x44, 0x44, 0x63, 0xb7, 0xa2, 0x9e, 0x60, 0x5f, 0xf9, 0xbf, 0x14, 0xa0, 0x76, 0x28,
|
||||
0x35, 0x4e, 0x85, 0x5b, 0x85, 0x3c, 0x26, 0xc7, 0x26, 0x8b, 0xf2, 0x88, 0x02, 0x80, 0x36, 0x39,
|
||||
0x8e, 0xe2, 0x48, 0x71, 0xe4, 0x54, 0x98, 0x2b, 0xc1, 0xa8, 0x13, 0x4c, 0x6e, 0xb1, 0x85, 0x40,
|
||||
0x1e, 0x94, 0x9f, 0xbc, 0x1c, 0x51, 0x26, 0xc5, 0xcf, 0x2b, 0x9a, 0xb1, 0x8d, 0x9e, 0xc3, 0x72,
|
||||
0x7a, 0x7e, 0x2c, 0x04, 0xe3, 0x6e, 0x41, 0x09, 0x7e, 0x3f, 0x2b, 0xb8, 0x1d, 0x54, 0x30, 0xe5,
|
||||
0xf3, 0x24, 0x16, 0xec, 0x0c, 0x4f, 0xf3, 0x48, 0xad, 0x0f, 0x09, 0xe7, 0x32, 0x42, 0x2d, 0x54,
|
||||
0x6a, 0xca, 0x70, 0xbe, 0x66, 0x34, 0x16, 0x24, 0xee, 0x29, 0xa1, 0x2a, 0x78, 0x6c, 0xcb, 0x70,
|
||||
0xd2, 0xb3, 0x0e, 0xa7, 0x74, 0xa3, 0x70, 0xa6, 0x7c, 0x4c, 0x38, 0x53, 0x77, 0x68, 0x1b, 0x96,
|
||||
0x76, 0xc2, 0xee, 0x0b, 0xa2, 0x34, 0xa9, 0x6e, 0xad, 0x65, 0x09, 0xd5, 0xbf, 0xbf, 0x55, 0x22,
|
||||
0xf0, 0x56, 0x41, 0x96, 0x07, 0xd6, 0x2e, 0xde, 0x23, 0x40, 0xd9, 0xf7, 0x4a, 0x5d, 0x4e, 0xc8,
|
||||
0x59, 0xaa, 0xcb, 0x09, 0x39, 0x93, 0x45, 0x7c, 0x1a, 0x0e, 0x12, 0x5d, 0xdc, 0x15, 0xac, 0x8d,
|
||||
0xed, 0xdc, 0x97, 0x8e, 0x64, 0xc8, 0x86, 0xb8, 0x08, 0x83, 0xff, 0xda, 0x81, 0x9a, 0x1d, 0x21,
|
||||
0xfa, 0x00, 0x2a, 0x3a, 0xa8, 0x49, 0x71, 0x4c, 0x2e, 0x64, 0x1d, 0xee, 0x0e, 0x8d, 0xc1, 0xdd,
|
||||
0x5c, 0x3d, 0xdf, 0xa8, 0x60, 0xeb, 0x06, 0x7d, 0x07, 0x55, 0x0d, 0xd6, 0x59, 0xce, 0xab, 0x2c,
|
||||
0x37, 0xaf, 0x4e, 0x4a, 0x60, 0x79, 0xe8, 0x1c, 0xdb, 0x1c, 0xde, 0x43, 0x58, 0x9d, 0x05, 0x2c,
|
||||
0xf4, 0xc2, 0xdf, 0x1d, 0x58, 0x36, 0xa2, 0x9a, 0x2e, 0x14, 0xa6, 0x8c, 0x84, 0xa5, 0x77, 0xa6,
|
||||
0x1f, 0x3d, 0xb8, 0xb4, 0x1e, 0x34, 0x2c, 0x98, 0xf5, 0xd3, 0xf1, 0x66, 0xe8, 0xbc, 0x1d, 0x78,
|
||||
0x6f, 0x2e, 0x74, 0xa1, 0xc8, 0x3f, 0x82, 0xe5, 0x43, 0x11, 0x8a, 0x84, 0x5f, 0xfa, 0x93, 0xf5,
|
||||
0x7f, 0x73, 0x60, 0x25, 0xc5, 0x98, 0xd7, 0x7d, 0x0e, 0xe5, 0x53, 0xc2, 0x04, 0x79, 0x49, 0xb8,
|
||||
0x79, 0x95, 0x9b, 0x7d, 0xd5, 0xf7, 0x0a, 0x81, 0xc7, 0x48, 0xb4, 0x0d, 0x65, 0xae, 0x78, 0x88,
|
||||
0x96, 0x75, 0x6e, 0x29, 0x6b, 0x2f, 0xf3, 0xbd, 0x31, 0x1e, 0x35, 0xa1, 0x30, 0xa0, 0xfd, 0x54,
|
||||
0xed, 0xf7, 0x2f, 0xf3, 0xdb, 0xa7, 0x7d, 0xac, 0x80, 0xfe, 0x79, 0x0e, 0x8a, 0xfa, 0x0e, 0xed,
|
||||
0x41, 0xb1, 0x17, 0xf5, 0x09, 0x17, 0xfa, 0x55, 0xad, 0x2d, 0xf9, 0x03, 0xf9, 0xfb, 0x7c, 0x7d,
|
||||
0xc3, 0x1a, 0x54, 0x74, 0x44, 0x62, 0x39, 0x28, 0xc3, 0x28, 0x26, 0x8c, 0x37, 0xfb, 0xf4, 0x9e,
|
||||
0x76, 0x09, 0xda, 0xea, 0x0f, 0x36, 0x0c, 0x92, 0x2b, 0x8a, 0x47, 0x89, 0x30, 0x85, 0x79, 0x3b,
|
||||
0x2e, 0xcd, 0x20, 0x47, 0x44, 0x1c, 0x0e, 0x89, 0xe9, 0x6b, 0xea, 0x2c, 0x47, 0x44, 0x57, 0xd6,
|
||||
0x6d, 0x4f, 0x0d, 0x8e, 0x32, 0x36, 0x16, 0xda, 0x86, 0x12, 0x17, 0x21, 0x13, 0xa4, 0xa7, 0x5a,
|
||||
0xd2, 0x4d, 0x7a, 0x7b, 0xea, 0x80, 0x1e, 0x42, 0xa5, 0x4b, 0x87, 0xa3, 0x01, 0x91, 0xde, 0xc5,
|
||||
0x1b, 0x7a, 0x4f, 0x5c, 0x64, 0xf5, 0x10, 0xc6, 0x28, 0x53, 0x53, 0xa5, 0x82, 0xb5, 0xe1, 0xff,
|
||||
0x97, 0x83, 0x9a, 0x2d, 0x56, 0x66, 0x62, 0xee, 0x41, 0x51, 0x4b, 0xaf, 0xab, 0xee, 0x76, 0xa9,
|
||||
0xd2, 0x0c, 0x73, 0x53, 0xe5, 0x42, 0xa9, 0x9b, 0x30, 0x35, 0x4e, 0xf5, 0x90, 0x4d, 0x4d, 0x19,
|
||||
0xb0, 0xa0, 0x22, 0x1c, 0xa8, 0x54, 0xe5, 0xb1, 0x36, 0xe4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d,
|
||||
0xd9, 0xb1, 0x9b, 0x2d, 0x43, 0xe9, 0x8d, 0x64, 0x28, 0x2f, 0x2c, 0x83, 0xff, 0xa7, 0x03, 0x95,
|
||||
0x71, 0x95, 0x5b, 0xd9, 0x75, 0xde, 0x38, 0xbb, 0x53, 0x99, 0xc9, 0xdd, 0x2e, 0x33, 0x77, 0xa0,
|
||||
0xc8, 0x05, 0x23, 0xe1, 0x50, 0x69, 0x94, 0xc7, 0xc6, 0x92, 0xfd, 0x64, 0xc8, 0xfb, 0x4a, 0xa1,
|
||||
0x1a, 0x96, 0x47, 0xdf, 0x87, 0x5a, 0xeb, 0x4c, 0x10, 0x7e, 0x40, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6,
|
||||
0xbd, 0x50, 0x84, 0xea, 0x1d, 0x35, 0xac, 0xce, 0xfe, 0x5d, 0x40, 0xfb, 0x11, 0x17, 0xcf, 0x29,
|
||||
0x3b, 0x21, 0x8c, 0xcf, 0xdb, 0x03, 0xf3, 0xd6, 0x1e, 0x78, 0x00, 0xef, 0x4c, 0xa1, 0x4d, 0x97,
|
||||
0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0xb4, 0xcb, 0xcc, 0x2a, 0xf8, 0xab, 0x03, 0x35, 0xfb,
|
||||
0x1f, 0x99, 0xca, 0x6e, 0x41, 0x71, 0x3f, 0xec, 0x90, 0x41, 0xda, 0xc6, 0x36, 0xae, 0x26, 0x0e,
|
||||
0x34, 0x58, 0xf7, 0x71, 0xe3, 0xe9, 0x7d, 0x05, 0x55, 0xeb, 0x7a, 0x91, 0x9e, 0xbd, 0xf5, 0x6f,
|
||||
0x1e, 0x4a, 0x3b, 0x7a, 0xa9, 0x47, 0xcf, 0xa0, 0x32, 0x5e, 0x81, 0x91, 0x9f, 0x8d, 0x63, 0x76,
|
||||
0x97, 0xf6, 0x3e, 0xbe, 0x12, 0x63, 0x32, 0xf7, 0x0d, 0x2c, 0xa9, 0xa5, 0x1c, 0xcd, 0x49, 0x99,
|
||||
0xbd, 0xad, 0x7b, 0x57, 0x2f, 0xd7, 0x9b, 0x8e, 0x64, 0x52, 0xd3, 0x6d, 0x1e, 0x93, 0xbd, 0x06,
|
||||
0x79, 0xeb, 0xd7, 0x8c, 0x45, 0x74, 0x00, 0x45, 0xd3, 0x68, 0xe6, 0x41, 0xed, 0x19, 0xe6, 0xd5,
|
||||
0x2f, 0x07, 0x68, 0xb2, 0x4d, 0x07, 0x1d, 0x8c, 0x77, 0xbc, 0x79, 0xa1, 0xd9, 0x05, 0xea, 0x5d,
|
||||
0xf3, 0xff, 0x86, 0xb3, 0xe9, 0xa0, 0x1f, 0xa0, 0x6a, 0x95, 0x20, 0xfa, 0x24, 0xeb, 0x92, 0xad,
|
||||
0x67, 0xef, 0xd3, 0x6b, 0x50, 0x3a, 0xd8, 0x56, 0xed, 0xd5, 0xc5, 0x9a, 0xf3, 0xd7, 0xc5, 0x9a,
|
||||
0xf3, 0xcf, 0xc5, 0x9a, 0xd3, 0x29, 0xaa, 0x5f, 0xe4, 0x67, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff,
|
||||
0x4d, 0x94, 0x5a, 0xb6, 0xd8, 0x0d, 0x00, 0x00,
|
||||
// 1214 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0x1b, 0x55,
|
||||
0x10, 0x67, 0x6d, 0xc7, 0xf6, 0x8e, 0x9d, 0x28, 0x3c, 0xa0, 0x5a, 0x2d, 0x90, 0x98, 0x05, 0x24,
|
||||
0xab, 0x6a, 0xd7, 0x69, 0xa0, 0x08, 0x72, 0xa8, 0x5a, 0xc7, 0x45, 0x24, 0x4a, 0x44, 0xd8, 0x34,
|
||||
0x54, 0xe2, 0xb6, 0xb6, 0x5f, 0xdc, 0x55, 0xd6, 0xfb, 0x96, 0xf7, 0x9e, 0xa3, 0x86, 0x4f, 0xc1,
|
||||
0x81, 0x6f, 0xc2, 0x81, 0x33, 0x07, 0xa4, 0xde, 0xe0, 0xcc, 0x21, 0x45, 0xb9, 0xc3, 0x67, 0x40,
|
||||
0xef, 0xcf, 0xda, 0xcf, 0x5e, 0xe7, 0x8f, 0xd3, 0x93, 0xdf, 0xcc, 0xfe, 0xe6, 0xb7, 0xf3, 0x66,
|
||||
0x66, 0x67, 0xc6, 0xb0, 0xdc, 0x23, 0x09, 0xa7, 0x24, 0xf6, 0x53, 0x4a, 0x38, 0x41, 0xab, 0x43,
|
||||
0xd2, 0x3d, 0xf3, 0xbb, 0xa3, 0x28, 0xee, 0x9f, 0x44, 0xdc, 0x3f, 0x7d, 0xe0, 0xde, 0x1f, 0x44,
|
||||
0xfc, 0xc5, 0xa8, 0xeb, 0xf7, 0xc8, 0xb0, 0x35, 0x20, 0x03, 0xd2, 0x92, 0xc0, 0xee, 0xe8, 0x58,
|
||||
0x4a, 0x52, 0x90, 0x27, 0x45, 0xe0, 0xae, 0x0f, 0x08, 0x19, 0xc4, 0x78, 0x82, 0xe2, 0xd1, 0x10,
|
||||
0x33, 0x1e, 0x0e, 0x53, 0x0d, 0xb8, 0x67, 0xf0, 0x89, 0x97, 0xb5, 0xb2, 0x97, 0xb5, 0x18, 0x89,
|
||||
0x4f, 0x31, 0x6d, 0xa5, 0xdd, 0x16, 0x49, 0x99, 0x42, 0x7b, 0x2b, 0x50, 0x3f, 0xa0, 0xa3, 0x04,
|
||||
0x07, 0xf8, 0xc7, 0x11, 0x66, 0xdc, 0xbb, 0x0b, 0xab, 0x9d, 0x88, 0x9d, 0x1c, 0xb1, 0x70, 0x90,
|
||||
0xe9, 0xd0, 0x1d, 0x28, 0x1f, 0x47, 0x31, 0xc7, 0xd4, 0xb1, 0x1a, 0x56, 0xd3, 0x0e, 0xb4, 0xe4,
|
||||
0xed, 0xc2, 0xdb, 0x06, 0x96, 0xa5, 0x24, 0x61, 0x18, 0x3d, 0x84, 0x32, 0xc5, 0x3d, 0x42, 0xfb,
|
||||
0x8e, 0xd5, 0x28, 0x36, 0x6b, 0x9b, 0x1f, 0xfa, 0xb3, 0x37, 0xf6, 0xb5, 0x81, 0x00, 0x05, 0x1a,
|
||||
0xec, 0xfd, 0x5e, 0x80, 0x9a, 0xa1, 0x47, 0x2b, 0x50, 0xd8, 0xe9, 0xe8, 0xf7, 0x15, 0x76, 0x3a,
|
||||
0xc8, 0x81, 0xca, 0xfe, 0x88, 0x87, 0xdd, 0x18, 0x3b, 0x85, 0x86, 0xd5, 0xac, 0x06, 0x99, 0x88,
|
||||
0xde, 0x85, 0xa5, 0x9d, 0xe4, 0x88, 0x61, 0xa7, 0x28, 0xf5, 0x4a, 0x40, 0x08, 0x4a, 0x87, 0xd1,
|
||||
0x4f, 0xd8, 0x29, 0x35, 0xac, 0x66, 0x31, 0x90, 0x67, 0x71, 0x8f, 0x83, 0x90, 0xe2, 0x84, 0x3b,
|
||||
0x4b, 0xea, 0x1e, 0x4a, 0x42, 0x6d, 0xb0, 0xb7, 0x29, 0x0e, 0x39, 0xee, 0x3f, 0xe1, 0x4e, 0xb9,
|
||||
0x61, 0x35, 0x6b, 0x9b, 0xae, 0xaf, 0xc2, 0xec, 0x67, 0x61, 0xf6, 0x9f, 0x65, 0x61, 0x6e, 0x57,
|
||||
0x5f, 0x9d, 0xaf, 0xbf, 0xf5, 0xf3, 0xeb, 0x75, 0x2b, 0x98, 0x98, 0xa1, 0xc7, 0x00, 0x7b, 0x21,
|
||||
0xe3, 0x47, 0x4c, 0x92, 0x54, 0xae, 0x25, 0x29, 0x49, 0x02, 0xc3, 0x06, 0xad, 0x01, 0xc8, 0x00,
|
||||
0x6c, 0x93, 0x51, 0xc2, 0x9d, 0xaa, 0xf4, 0xdb, 0xd0, 0xa0, 0x06, 0xd4, 0x3a, 0x98, 0xf5, 0x68,
|
||||
0x94, 0xf2, 0x88, 0x24, 0x8e, 0x2d, 0xaf, 0x60, 0xaa, 0xbc, 0x5f, 0x4a, 0x50, 0x3f, 0x14, 0x39,
|
||||
0xce, 0x12, 0xb7, 0x0a, 0xc5, 0x00, 0x1f, 0xeb, 0x28, 0x8a, 0x23, 0xf2, 0x01, 0x3a, 0xf8, 0x38,
|
||||
0x4a, 0x22, 0xc9, 0x51, 0x90, 0x6e, 0xae, 0xf8, 0x69, 0xd7, 0x9f, 0x68, 0x03, 0x03, 0x81, 0x5c,
|
||||
0xa8, 0x3e, 0x7d, 0x99, 0x12, 0x2a, 0x92, 0x5f, 0x94, 0x34, 0x63, 0x19, 0x3d, 0x87, 0xe5, 0xec,
|
||||
0xfc, 0x84, 0x73, 0xca, 0x9c, 0x92, 0x4c, 0xf8, 0x83, 0x7c, 0xc2, 0x4d, 0xa7, 0xfc, 0x29, 0x9b,
|
||||
0xa7, 0x09, 0xa7, 0x67, 0xc1, 0x34, 0x8f, 0xc8, 0xf5, 0x21, 0x66, 0x4c, 0x78, 0xa8, 0x12, 0x95,
|
||||
0x89, 0xc2, 0x9d, 0xaf, 0x29, 0x49, 0x38, 0x4e, 0xfa, 0x32, 0x51, 0x76, 0x30, 0x96, 0x85, 0x3b,
|
||||
0xd9, 0x59, 0xb9, 0x53, 0xb9, 0x91, 0x3b, 0x53, 0x36, 0xda, 0x9d, 0x29, 0x1d, 0xda, 0x82, 0xa5,
|
||||
0xed, 0xb0, 0xf7, 0x02, 0xcb, 0x9c, 0xd4, 0x36, 0xd7, 0xf2, 0x84, 0xf2, 0xf1, 0xb7, 0x32, 0x09,
|
||||
0xac, 0x5d, 0x12, 0xe5, 0x11, 0x28, 0x13, 0xf7, 0x31, 0xa0, 0xfc, 0x7d, 0x45, 0x5e, 0x4e, 0xf0,
|
||||
0x59, 0x96, 0x97, 0x13, 0x7c, 0x26, 0x8a, 0xf8, 0x34, 0x8c, 0x47, 0xaa, 0xb8, 0xed, 0x40, 0x09,
|
||||
0x5b, 0x85, 0x2f, 0x2d, 0xc1, 0x90, 0x77, 0x71, 0x11, 0x06, 0xef, 0xb5, 0x05, 0x75, 0xd3, 0x43,
|
||||
0xf4, 0x01, 0xd8, 0xca, 0xa9, 0x49, 0x71, 0x4c, 0x14, 0xa2, 0x0e, 0x77, 0x86, 0x5a, 0x60, 0x4e,
|
||||
0xa1, 0x51, 0x6c, 0xda, 0x81, 0xa1, 0x41, 0xdf, 0x41, 0x4d, 0x81, 0x55, 0x94, 0x8b, 0x32, 0xca,
|
||||
0xad, 0xab, 0x83, 0xe2, 0x1b, 0x16, 0x2a, 0xc6, 0x26, 0x87, 0xfb, 0x08, 0x56, 0x67, 0x01, 0x0b,
|
||||
0xdd, 0xf0, 0x37, 0x0b, 0x96, 0x75, 0x52, 0x75, 0x17, 0x0a, 0x33, 0x46, 0x4c, 0x33, 0x9d, 0xee,
|
||||
0x47, 0x0f, 0x2f, 0xad, 0x07, 0x05, 0xf3, 0x67, 0xed, 0x94, 0xbf, 0x39, 0x3a, 0x77, 0x1b, 0xde,
|
||||
0x9b, 0x0b, 0x5d, 0xc8, 0xf3, 0x8f, 0x60, 0xf9, 0x90, 0x87, 0x7c, 0xc4, 0x2e, 0xfd, 0x64, 0xbd,
|
||||
0x5f, 0x2d, 0x58, 0xc9, 0x30, 0xfa, 0x76, 0x9f, 0x43, 0xf5, 0x14, 0x53, 0x8e, 0x5f, 0x62, 0xa6,
|
||||
0x6f, 0xe5, 0xe4, 0x6f, 0xf5, 0xbd, 0x44, 0x04, 0x63, 0x24, 0xda, 0x82, 0x2a, 0x93, 0x3c, 0x58,
|
||||
0xa5, 0x75, 0x6e, 0x29, 0x2b, 0x2b, 0xfd, 0xbe, 0x31, 0x1e, 0xb5, 0xa0, 0x14, 0x93, 0x41, 0x96,
|
||||
0xed, 0xf7, 0x2f, 0xb3, 0xdb, 0x23, 0x83, 0x40, 0x02, 0xbd, 0xf3, 0x02, 0x94, 0x95, 0x0e, 0xed,
|
||||
0x42, 0xb9, 0x1f, 0x0d, 0x30, 0xe3, 0xea, 0x56, 0xed, 0x4d, 0xf1, 0x81, 0xfc, 0x7d, 0xbe, 0x7e,
|
||||
0xd7, 0x18, 0x54, 0x24, 0xc5, 0x89, 0x18, 0x94, 0x61, 0x94, 0x60, 0xca, 0x5a, 0x03, 0x72, 0x5f,
|
||||
0x99, 0xf8, 0x1d, 0xf9, 0x13, 0x68, 0x06, 0xc1, 0x15, 0x25, 0xe9, 0x88, 0xeb, 0xc2, 0xbc, 0x1d,
|
||||
0x97, 0x62, 0x10, 0x23, 0x22, 0x09, 0x87, 0x58, 0xf7, 0x35, 0x79, 0x16, 0x23, 0xa2, 0x27, 0xea,
|
||||
0xb6, 0x2f, 0x07, 0x47, 0x35, 0xd0, 0x12, 0xda, 0x82, 0x0a, 0xe3, 0x21, 0xe5, 0xb8, 0x2f, 0x5b,
|
||||
0xd2, 0x4d, 0x7a, 0x7b, 0x66, 0x80, 0x1e, 0x81, 0xdd, 0x23, 0xc3, 0x34, 0xc6, 0xc2, 0xba, 0x7c,
|
||||
0x43, 0xeb, 0x89, 0x89, 0xa8, 0x1e, 0x4c, 0x29, 0xa1, 0x72, 0xaa, 0xd8, 0x81, 0x12, 0xbc, 0xff,
|
||||
0x0a, 0x50, 0x37, 0x93, 0x95, 0x9b, 0x98, 0xbb, 0x50, 0x56, 0xa9, 0x57, 0x55, 0x77, 0xbb, 0x50,
|
||||
0x29, 0x86, 0xb9, 0xa1, 0x72, 0xa0, 0xd2, 0x1b, 0x51, 0x39, 0x4e, 0xd5, 0x90, 0xcd, 0x44, 0xe1,
|
||||
0x30, 0x27, 0x3c, 0x8c, 0x65, 0xa8, 0x8a, 0x81, 0x12, 0xc4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d,
|
||||
0xd9, 0xb1, 0x99, 0x99, 0x86, 0xca, 0x1b, 0xa5, 0xa1, 0xba, 0x70, 0x1a, 0xbc, 0x3f, 0x2c, 0xb0,
|
||||
0xc7, 0x55, 0x6e, 0x44, 0xd7, 0x7a, 0xe3, 0xe8, 0x4e, 0x45, 0xa6, 0x70, 0xbb, 0xc8, 0xdc, 0x81,
|
||||
0x32, 0xe3, 0x14, 0x87, 0x43, 0x99, 0xa3, 0x62, 0xa0, 0x25, 0xd1, 0x4f, 0x86, 0x6c, 0x20, 0x33,
|
||||
0x54, 0x0f, 0xc4, 0xd1, 0xf3, 0xa0, 0xde, 0x3e, 0xe3, 0x98, 0xed, 0x63, 0x26, 0x96, 0x0b, 0x91,
|
||||
0xdb, 0x7e, 0xc8, 0x43, 0x79, 0x8f, 0x7a, 0x20, 0xcf, 0xde, 0x3d, 0x40, 0x7b, 0x11, 0xe3, 0xcf,
|
||||
0x09, 0x3d, 0xc1, 0x94, 0xcd, 0xdb, 0x03, 0x8b, 0xc6, 0x1e, 0xb8, 0x0f, 0xef, 0x4c, 0xa1, 0x75,
|
||||
0x97, 0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0x94, 0xc9, 0xcc, 0x2a, 0xf8, 0xa7, 0x05, 0x75,
|
||||
0xf3, 0x41, 0xae, 0xb2, 0xdb, 0x50, 0xde, 0x0b, 0xbb, 0x38, 0xce, 0xda, 0xd8, 0xdd, 0xab, 0x89,
|
||||
0x7d, 0x05, 0x56, 0x7d, 0x5c, 0x5b, 0xa2, 0x0d, 0xb0, 0xd3, 0x38, 0xe4, 0xc7, 0x84, 0x0e, 0xb3,
|
||||
0xae, 0x56, 0x17, 0x7b, 0xd0, 0x81, 0x56, 0xea, 0x31, 0x3e, 0x01, 0xb9, 0x5f, 0x41, 0xcd, 0x20,
|
||||
0x5a, 0xa4, 0xcb, 0x6f, 0xfe, 0x5b, 0x84, 0xca, 0xb6, 0xfa, 0x1b, 0x80, 0x9e, 0x81, 0x3d, 0x5e,
|
||||
0x9a, 0x91, 0x97, 0xf7, 0x7c, 0x76, 0xfb, 0x76, 0x3f, 0xbe, 0x12, 0xa3, 0x63, 0xfd, 0x0d, 0x2c,
|
||||
0xc9, 0x35, 0x1e, 0xcd, 0x09, 0xb2, 0xb9, 0xdf, 0xbb, 0x57, 0xaf, 0xe3, 0x1b, 0x96, 0x60, 0x92,
|
||||
0xf3, 0x70, 0x1e, 0x93, 0xb9, 0x38, 0xb9, 0xeb, 0xd7, 0x0c, 0x52, 0xb4, 0x0f, 0x65, 0xdd, 0x9a,
|
||||
0xe6, 0x41, 0xcd, 0xa9, 0xe7, 0x36, 0x2e, 0x07, 0x28, 0xb2, 0x0d, 0x0b, 0xed, 0x8f, 0xb7, 0xc2,
|
||||
0x79, 0xae, 0x99, 0x25, 0xed, 0x5e, 0xf3, 0xbc, 0x69, 0x6d, 0x58, 0xe8, 0x07, 0xa8, 0x19, 0x45,
|
||||
0x8b, 0x3e, 0xc9, 0x9b, 0xe4, 0xbf, 0x00, 0xf7, 0xd3, 0x6b, 0x50, 0xca, 0xd9, 0x76, 0xfd, 0xd5,
|
||||
0xc5, 0x9a, 0xf5, 0xd7, 0xc5, 0x9a, 0xf5, 0xcf, 0xc5, 0x9a, 0xd5, 0x2d, 0xcb, 0x6f, 0xf8, 0xb3,
|
||||
0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x86, 0xd4, 0x0f, 0xa1, 0x0a, 0x0e, 0x00, 0x00,
|
||||
}
|
||||
|
||||
1
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
1
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
@ -118,4 +118,5 @@ message ListWorkersResponse {
|
||||
message WorkerRecord {
|
||||
string ID = 1;
|
||||
map<string, string> Labels = 2;
|
||||
repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
6
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
6
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
@ -23,7 +22,7 @@ type Client struct {
|
||||
type ClientOpt interface{}
|
||||
|
||||
// New returns a new buildkit client. Address can be empty for the system-default address.
|
||||
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithDialer(dialer),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
@ -54,9 +53,6 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
address = appdefaults.Address
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address, gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
|
||||
|
||||
88
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
88
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
@ -17,8 +17,8 @@ type Meta struct {
|
||||
ProxyEnv *ProxyEnv
|
||||
}
|
||||
|
||||
func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
|
||||
e := &ExecOp{meta: meta, cachedOpMetadata: md}
|
||||
func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp {
|
||||
e := &ExecOp{meta: meta, constraints: c}
|
||||
rootMount := &mount{
|
||||
target: pb.RootMount,
|
||||
source: root,
|
||||
@ -28,32 +28,35 @@ func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
|
||||
if readOnly {
|
||||
e.root = root
|
||||
} else {
|
||||
e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
|
||||
o := &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
|
||||
if p := c.Platform; p != nil {
|
||||
o.platform = p
|
||||
}
|
||||
e.root = o
|
||||
}
|
||||
rootMount.output = e.root
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
type mount struct {
|
||||
target string
|
||||
readonly bool
|
||||
source Output
|
||||
output Output
|
||||
selector string
|
||||
cacheID string
|
||||
tmpfs bool
|
||||
target string
|
||||
readonly bool
|
||||
source Output
|
||||
output Output
|
||||
selector string
|
||||
cacheID string
|
||||
tmpfs bool
|
||||
cacheSharing CacheMountSharingMode
|
||||
// hasOutput bool
|
||||
}
|
||||
|
||||
type ExecOp struct {
|
||||
root Output
|
||||
mounts []*mount
|
||||
meta Meta
|
||||
cachedPBDigest digest.Digest
|
||||
cachedPB []byte
|
||||
cachedOpMetadata OpMetadata
|
||||
isValidated bool
|
||||
MarshalCache
|
||||
root Output
|
||||
mounts []*mount
|
||||
meta Meta
|
||||
constraints Constraints
|
||||
isValidated bool
|
||||
}
|
||||
|
||||
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
|
||||
@ -70,9 +73,13 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp
|
||||
} else if m.tmpfs {
|
||||
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
|
||||
} else {
|
||||
m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)}
|
||||
o := &output{vertex: e, getIndex: e.getMountIndexFn(m)}
|
||||
if p := e.constraints.Platform; p != nil {
|
||||
o.platform = p
|
||||
}
|
||||
m.output = o
|
||||
}
|
||||
e.cachedPB = nil
|
||||
e.Store(nil, nil, nil)
|
||||
e.isValidated = false
|
||||
return m.output
|
||||
}
|
||||
@ -107,9 +114,9 @@ func (e *ExecOp) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
if e.cachedPB != nil {
|
||||
return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil
|
||||
func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
if e.Cached(c) {
|
||||
return e.Load()
|
||||
}
|
||||
if err := e.Validate(); err != nil {
|
||||
return "", nil, nil, err
|
||||
@ -137,10 +144,9 @@ func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
}
|
||||
}
|
||||
|
||||
pop := &pb.Op{
|
||||
Op: &pb.Op_Exec{
|
||||
Exec: peo,
|
||||
},
|
||||
pop, md := MarshalConstraints(c, &e.constraints)
|
||||
pop.Op = &pb.Op_Exec{
|
||||
Exec: peo,
|
||||
}
|
||||
|
||||
outIndex := 0
|
||||
@ -150,7 +156,7 @@ func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
if m.tmpfs {
|
||||
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
|
||||
}
|
||||
inp, err := m.source.ToInput()
|
||||
inp, err := m.source.ToInput(c)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
@ -190,6 +196,14 @@ func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
pm.CacheOpt = &pb.CacheOpt{
|
||||
ID: m.cacheID,
|
||||
}
|
||||
switch m.cacheSharing {
|
||||
case CacheMountShared:
|
||||
pm.CacheOpt.Sharing = pb.CacheSharingOpt_SHARED
|
||||
case CacheMountPrivate:
|
||||
pm.CacheOpt.Sharing = pb.CacheSharingOpt_PRIVATE
|
||||
case CacheMountLocked:
|
||||
pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED
|
||||
}
|
||||
}
|
||||
if m.tmpfs {
|
||||
pm.MountType = pb.MountType_TMPFS
|
||||
@ -201,9 +215,8 @@ func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
e.cachedPBDigest = digest.FromBytes(dt)
|
||||
e.cachedPB = dt
|
||||
return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil
|
||||
e.Store(dt, md, c)
|
||||
return e.Load()
|
||||
}
|
||||
|
||||
func (e *ExecOp) Output() Output {
|
||||
@ -273,9 +286,10 @@ func SourcePath(src string) MountOption {
|
||||
}
|
||||
}
|
||||
|
||||
func AsPersistentCacheDir(id string) MountOption {
|
||||
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
|
||||
return func(m *mount) {
|
||||
m.cacheID = id
|
||||
m.cacheSharing = sharing
|
||||
}
|
||||
}
|
||||
|
||||
@ -366,7 +380,7 @@ func WithProxy(ps ProxyEnv) RunOption {
|
||||
}
|
||||
|
||||
type ExecInfo struct {
|
||||
opMetaWrapper
|
||||
constraintsWrapper
|
||||
State State
|
||||
Mounts []MountInfo
|
||||
ReadonlyRootFS bool
|
||||
@ -385,3 +399,11 @@ type ProxyEnv struct {
|
||||
FtpProxy string
|
||||
NoProxy string
|
||||
}
|
||||
|
||||
type CacheMountSharingMode int
|
||||
|
||||
const (
|
||||
CacheMountShared CacheMountSharingMode = iota
|
||||
CacheMountPrivate
|
||||
CacheMountLocked
|
||||
)
|
||||
|
||||
62
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
62
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -12,11 +13,11 @@ import (
|
||||
// Corresponds to the Definition structure defined in solver/pb.Definition.
|
||||
type Definition struct {
|
||||
Def [][]byte
|
||||
Metadata map[digest.Digest]OpMetadata
|
||||
Metadata map[digest.Digest]pb.OpMetadata
|
||||
}
|
||||
|
||||
func (def *Definition) ToPB() *pb.Definition {
|
||||
md := make(map[digest.Digest]OpMetadata)
|
||||
md := make(map[digest.Digest]pb.OpMetadata)
|
||||
for k, v := range def.Metadata {
|
||||
md[k] = v
|
||||
}
|
||||
@ -28,14 +29,12 @@ func (def *Definition) ToPB() *pb.Definition {
|
||||
|
||||
func (def *Definition) FromPB(x *pb.Definition) {
|
||||
def.Def = x.Def
|
||||
def.Metadata = make(map[digest.Digest]OpMetadata)
|
||||
def.Metadata = make(map[digest.Digest]pb.OpMetadata)
|
||||
for k, v := range x.Metadata {
|
||||
def.Metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
type OpMetadata = pb.OpMetadata
|
||||
|
||||
func WriteTo(def *Definition, w io.Writer) error {
|
||||
b, err := def.ToPB().Marshal()
|
||||
if err != nil {
|
||||
@ -58,3 +57,56 @@ func ReadFrom(r io.Reader) (*Definition, error) {
|
||||
def.FromPB(&pbDef)
|
||||
return &def, nil
|
||||
}
|
||||
|
||||
func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
|
||||
c := *base
|
||||
c.WorkerConstraints = append([]string{}, c.WorkerConstraints...)
|
||||
|
||||
if p := override.Platform; p != nil {
|
||||
c.Platform = p
|
||||
}
|
||||
|
||||
for _, wc := range override.WorkerConstraints {
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, wc)
|
||||
}
|
||||
|
||||
c.Metadata = mergeMetadata(c.Metadata, override.Metadata)
|
||||
|
||||
if c.Platform == nil {
|
||||
defaultPlatform := platforms.Normalize(platforms.DefaultSpec())
|
||||
c.Platform = &defaultPlatform
|
||||
}
|
||||
|
||||
return &pb.Op{
|
||||
Platform: &pb.Platform{
|
||||
OS: c.Platform.OS,
|
||||
Architecture: c.Platform.Architecture,
|
||||
Variant: c.Platform.Variant,
|
||||
OSVersion: c.Platform.OSVersion,
|
||||
OSFeatures: c.Platform.OSFeatures,
|
||||
},
|
||||
Constraints: &pb.WorkerConstraints{
|
||||
Filter: c.WorkerConstraints,
|
||||
},
|
||||
}, &c.Metadata
|
||||
}
|
||||
|
||||
type MarshalCache struct {
|
||||
digest digest.Digest
|
||||
dt []byte
|
||||
md *pb.OpMetadata
|
||||
constraints *Constraints
|
||||
}
|
||||
|
||||
func (mc *MarshalCache) Cached(c *Constraints) bool {
|
||||
return mc.dt != nil && mc.constraints == c
|
||||
}
|
||||
func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
return mc.digest, mc.dt, mc.md, nil
|
||||
}
|
||||
func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, c *Constraints) {
|
||||
mc.digest = digest.FromBytes(dt)
|
||||
mc.dt = dt
|
||||
mc.md = md
|
||||
mc.constraints = c
|
||||
}
|
||||
|
||||
26
vendor/github.com/moby/buildkit/client/llb/meta.go
generated
vendored
26
vendor/github.com/moby/buildkit/client/llb/meta.go
generated
vendored
@ -4,16 +4,19 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/google/shlex"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type contextKeyT string
|
||||
|
||||
var (
|
||||
keyArgs = contextKeyT("llb.exec.args")
|
||||
keyDir = contextKeyT("llb.exec.dir")
|
||||
keyEnv = contextKeyT("llb.exec.env")
|
||||
keyUser = contextKeyT("llb.exec.user")
|
||||
keyArgs = contextKeyT("llb.exec.args")
|
||||
keyDir = contextKeyT("llb.exec.dir")
|
||||
keyEnv = contextKeyT("llb.exec.env")
|
||||
keyUser = contextKeyT("llb.exec.user")
|
||||
keyPlatform = contextKeyT("llb.platform")
|
||||
)
|
||||
|
||||
func addEnv(key, value string) StateOption {
|
||||
@ -106,6 +109,21 @@ func shlexf(str string, v ...interface{}) StateOption {
|
||||
}
|
||||
}
|
||||
|
||||
func platform(p specs.Platform) StateOption {
|
||||
return func(s State) State {
|
||||
return s.WithValue(keyPlatform, platforms.Normalize(p))
|
||||
}
|
||||
}
|
||||
|
||||
func getPlatform(s State) *specs.Platform {
|
||||
v := s.Value(keyPlatform)
|
||||
if v != nil {
|
||||
p := v.(specs.Platform)
|
||||
return &p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnvList []KeyValue
|
||||
|
||||
type KeyValue struct {
|
||||
|
||||
3
vendor/github.com/moby/buildkit/client/llb/resolver.go
generated
vendored
3
vendor/github.com/moby/buildkit/client/llb/resolver.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
|
||||
@ -13,5 +14,5 @@ func WithMetaResolver(mr ImageMetaResolver) ImageOption {
|
||||
}
|
||||
|
||||
type ImageMetaResolver interface {
|
||||
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
|
||||
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
|
||||
}
|
||||
|
||||
65
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
65
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
@ -15,22 +15,21 @@ import (
|
||||
)
|
||||
|
||||
type SourceOp struct {
|
||||
id string
|
||||
attrs map[string]string
|
||||
output Output
|
||||
cachedPBDigest digest.Digest
|
||||
cachedPB []byte
|
||||
cachedOpMetadata OpMetadata
|
||||
err error
|
||||
MarshalCache
|
||||
id string
|
||||
attrs map[string]string
|
||||
output Output
|
||||
constraints Constraints
|
||||
err error
|
||||
}
|
||||
|
||||
func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp {
|
||||
func NewSource(id string, attrs map[string]string, c Constraints) *SourceOp {
|
||||
s := &SourceOp{
|
||||
id: id,
|
||||
attrs: attrs,
|
||||
cachedOpMetadata: md,
|
||||
id: id,
|
||||
attrs: attrs,
|
||||
constraints: c,
|
||||
}
|
||||
s.output = &output{vertex: s}
|
||||
s.output = &output{vertex: s, platform: c.Platform}
|
||||
return s
|
||||
}
|
||||
|
||||
@ -44,26 +43,26 @@ func (s *SourceOp) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||
if s.cachedPB != nil {
|
||||
return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil
|
||||
func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
if s.Cached(constraints) {
|
||||
return s.Load()
|
||||
}
|
||||
if err := s.Validate(); err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
proto := &pb.Op{
|
||||
Op: &pb.Op_Source{
|
||||
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
|
||||
},
|
||||
proto, md := MarshalConstraints(constraints, &s.constraints)
|
||||
|
||||
proto.Op = &pb.Op_Source{
|
||||
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
|
||||
}
|
||||
dt, err := proto.Marshal()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
s.cachedPB = dt
|
||||
s.cachedPBDigest = digest.FromBytes(dt)
|
||||
return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil
|
||||
|
||||
s.Store(dt, md, constraints)
|
||||
return s.Load()
|
||||
}
|
||||
|
||||
func (s *SourceOp) Output() Output {
|
||||
@ -74,10 +73,6 @@ func (s *SourceOp) Inputs() []Output {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Source(id string) State {
|
||||
return NewState(NewSource(id, nil, OpMetadata{}).Output())
|
||||
}
|
||||
|
||||
func Image(ref string, opts ...ImageOption) State {
|
||||
r, err := reference.ParseNormalizedNamed(ref)
|
||||
if err == nil {
|
||||
@ -87,12 +82,12 @@ func Image(ref string, opts ...ImageOption) State {
|
||||
for _, opt := range opts {
|
||||
opt.SetImageOption(&info)
|
||||
}
|
||||
src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial
|
||||
src := NewSource("docker-image://"+ref, nil, info.Constraints) // controversial
|
||||
if err != nil {
|
||||
src.err = err
|
||||
}
|
||||
if info.metaResolver != nil {
|
||||
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref)
|
||||
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, info.Constraints.Platform)
|
||||
if err != nil {
|
||||
src.err = err
|
||||
} else {
|
||||
@ -136,7 +131,7 @@ func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
|
||||
}
|
||||
|
||||
type ImageInfo struct {
|
||||
opMetaWrapper
|
||||
constraintsWrapper
|
||||
metaResolver ImageMetaResolver
|
||||
}
|
||||
|
||||
@ -169,7 +164,7 @@ func Git(remote, ref string, opts ...GitOption) State {
|
||||
if url != "" {
|
||||
attrs[pb.AttrFullRemoteURL] = url
|
||||
}
|
||||
source := NewSource("git://"+id, attrs, gi.Metadata())
|
||||
source := NewSource("git://"+id, attrs, gi.Constraints)
|
||||
return NewState(source.Output())
|
||||
}
|
||||
|
||||
@ -183,7 +178,7 @@ func (fn gitOptionFunc) SetGitOption(gi *GitInfo) {
|
||||
}
|
||||
|
||||
type GitInfo struct {
|
||||
opMetaWrapper
|
||||
constraintsWrapper
|
||||
KeepGitDir bool
|
||||
}
|
||||
|
||||
@ -220,7 +215,7 @@ func Local(name string, opts ...LocalOption) State {
|
||||
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
|
||||
}
|
||||
|
||||
source := NewSource("local://"+name, attrs, gi.Metadata())
|
||||
source := NewSource("local://"+name, attrs, gi.Constraints)
|
||||
return NewState(source.Output())
|
||||
}
|
||||
|
||||
@ -280,7 +275,7 @@ func SharedKeyHint(h string) LocalOption {
|
||||
}
|
||||
|
||||
type LocalInfo struct {
|
||||
opMetaWrapper
|
||||
constraintsWrapper
|
||||
SessionID string
|
||||
IncludePatterns string
|
||||
ExcludePatterns string
|
||||
@ -310,12 +305,12 @@ func HTTP(url string, opts ...HTTPOption) State {
|
||||
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
|
||||
}
|
||||
|
||||
source := NewSource(url, attrs, hi.Metadata())
|
||||
source := NewSource(url, attrs, hi.Constraints)
|
||||
return NewState(source.Output())
|
||||
}
|
||||
|
||||
type HTTPInfo struct {
|
||||
opMetaWrapper
|
||||
constraintsWrapper
|
||||
Checksum digest.Digest
|
||||
Filename string
|
||||
Perm int
|
||||
|
||||
181
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
181
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
@ -3,21 +3,23 @@ package llb
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/system"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type StateOption func(State) State
|
||||
|
||||
type Output interface {
|
||||
ToInput() (*pb.Input, error)
|
||||
ToInput(*Constraints) (*pb.Input, error)
|
||||
Vertex() Vertex
|
||||
}
|
||||
|
||||
type Vertex interface {
|
||||
Validate() error
|
||||
Marshal() (digest.Digest, []byte, *OpMetadata, error)
|
||||
Marshal(*Constraints) (digest.Digest, []byte, *pb.OpMetadata, error)
|
||||
Output() Output
|
||||
Inputs() []Output
|
||||
}
|
||||
@ -29,12 +31,25 @@ func NewState(o Output) State {
|
||||
}
|
||||
s = dir("/")(s)
|
||||
s = addEnv("PATH", system.DefaultPathEnv)(s)
|
||||
s = s.ensurePlatform()
|
||||
return s
|
||||
}
|
||||
|
||||
type State struct {
|
||||
out Output
|
||||
ctx context.Context
|
||||
out Output
|
||||
ctx context.Context
|
||||
opts []ConstraintsOpt
|
||||
}
|
||||
|
||||
func (s State) ensurePlatform() State {
|
||||
if o, ok := s.out.(interface {
|
||||
Platform() *specs.Platform
|
||||
}); ok {
|
||||
if p := o.Platform(); p != nil {
|
||||
s = platform(*p)(s)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s State) WithValue(k, v interface{}) State {
|
||||
@ -48,18 +63,32 @@ func (s State) Value(k interface{}) interface{} {
|
||||
return s.ctx.Value(k)
|
||||
}
|
||||
|
||||
func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
|
||||
func (s State) SetMarhalDefaults(co ...ConstraintsOpt) State {
|
||||
s.opts = co
|
||||
return s
|
||||
}
|
||||
|
||||
func (s State) Marshal(co ...ConstraintsOpt) (*Definition, error) {
|
||||
def := &Definition{
|
||||
Metadata: make(map[digest.Digest]OpMetadata, 0),
|
||||
Metadata: make(map[digest.Digest]pb.OpMetadata, 0),
|
||||
}
|
||||
if s.Output() == nil {
|
||||
return def, nil
|
||||
}
|
||||
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md)
|
||||
|
||||
defaultPlatform := platforms.Normalize(platforms.DefaultSpec())
|
||||
c := &Constraints{
|
||||
Platform: &defaultPlatform,
|
||||
}
|
||||
for _, o := range append(s.opts, co...) {
|
||||
o.SetConstraintsOption(c)
|
||||
}
|
||||
|
||||
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
inp, err := s.Output().ToInput()
|
||||
inp, err := s.Output().ToInput(c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
@ -72,29 +101,25 @@ func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
|
||||
return def, nil
|
||||
}
|
||||
|
||||
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) {
|
||||
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) {
|
||||
if _, ok := vertexCache[v]; ok {
|
||||
return def, nil
|
||||
}
|
||||
for _, inp := range v.Inputs() {
|
||||
var err error
|
||||
def, err = marshal(inp.Vertex(), def, cache, vertexCache, md)
|
||||
def, err = marshal(inp.Vertex(), def, cache, vertexCache, c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
}
|
||||
|
||||
dgst, dt, opMeta, err := v.Marshal()
|
||||
dgst, dt, opMeta, err := v.Marshal(c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
vertexCache[v] = struct{}{}
|
||||
if opMeta != nil {
|
||||
m := mergeMetadata(def.Metadata[dgst], *opMeta)
|
||||
for _, f := range md {
|
||||
f.SetMetadataOption(&m)
|
||||
}
|
||||
def.Metadata[dgst] = m
|
||||
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
|
||||
}
|
||||
if _, ok := cache[dgst]; ok {
|
||||
return def, nil
|
||||
@ -113,14 +138,19 @@ func (s State) Output() Output {
|
||||
}
|
||||
|
||||
func (s State) WithOutput(o Output) State {
|
||||
return State{
|
||||
s = State{
|
||||
out: o,
|
||||
ctx: s.ctx,
|
||||
}
|
||||
s = s.ensurePlatform()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s State) Run(ro ...RunOption) ExecState {
|
||||
ei := &ExecInfo{State: s}
|
||||
if p := s.GetPlatform(); p != nil {
|
||||
ei.Constraints.Platform = p
|
||||
}
|
||||
for _, o := range ro {
|
||||
o.SetRunOption(ei)
|
||||
}
|
||||
@ -132,7 +162,7 @@ func (s State) Run(ro ...RunOption) ExecState {
|
||||
ProxyEnv: ei.ProxyEnv,
|
||||
}
|
||||
|
||||
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata())
|
||||
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints)
|
||||
for _, m := range ei.Mounts {
|
||||
exec.AddMount(m.Target, m.Source, m.Opts...)
|
||||
}
|
||||
@ -178,6 +208,14 @@ func (s State) User(v string) State {
|
||||
return user(v)(s)
|
||||
}
|
||||
|
||||
func (s State) Platform(p specs.Platform) State {
|
||||
return platform(p)(s)
|
||||
}
|
||||
|
||||
func (s State) GetPlatform() *specs.Platform {
|
||||
return getPlatform(s)
|
||||
}
|
||||
|
||||
func (s State) With(so ...StateOption) State {
|
||||
for _, o := range so {
|
||||
s = o(s)
|
||||
@ -189,9 +227,10 @@ type output struct {
|
||||
vertex Vertex
|
||||
getIndex func() (pb.OutputIndex, error)
|
||||
err error
|
||||
platform *specs.Platform
|
||||
}
|
||||
|
||||
func (o *output) ToInput() (*pb.Input, error) {
|
||||
func (o *output) ToInput(c *Constraints) (*pb.Input, error) {
|
||||
if o.err != nil {
|
||||
return nil, o.err
|
||||
}
|
||||
@ -203,7 +242,7 @@ func (o *output) ToInput() (*pb.Input, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dgst, _, _, err := o.vertex.Marshal()
|
||||
dgst, _, _, err := o.vertex.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -214,8 +253,12 @@ func (o *output) Vertex() Vertex {
|
||||
return o.vertex
|
||||
}
|
||||
|
||||
type MetadataOpt interface {
|
||||
SetMetadataOption(*OpMetadata)
|
||||
func (o *output) Platform() *specs.Platform {
|
||||
return o.platform
|
||||
}
|
||||
|
||||
type ConstraintsOpt interface {
|
||||
SetConstraintsOption(*Constraints)
|
||||
RunOption
|
||||
LocalOption
|
||||
HTTPOption
|
||||
@ -223,33 +266,33 @@ type MetadataOpt interface {
|
||||
GitOption
|
||||
}
|
||||
|
||||
type metadataOptFunc func(m *OpMetadata)
|
||||
type constraintsOptFunc func(m *Constraints)
|
||||
|
||||
func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) {
|
||||
func (fn constraintsOptFunc) SetConstraintsOption(m *Constraints) {
|
||||
fn(m)
|
||||
}
|
||||
|
||||
func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) {
|
||||
ei.ApplyMetadata(fn)
|
||||
func (fn constraintsOptFunc) SetRunOption(ei *ExecInfo) {
|
||||
ei.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) {
|
||||
li.ApplyMetadata(fn)
|
||||
func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) {
|
||||
li.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) {
|
||||
hi.ApplyMetadata(fn)
|
||||
func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) {
|
||||
hi.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) {
|
||||
ii.ApplyMetadata(fn)
|
||||
func (fn constraintsOptFunc) SetImageOption(ii *ImageInfo) {
|
||||
ii.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn metadataOptFunc) SetGitOption(gi *GitInfo) {
|
||||
gi.ApplyMetadata(fn)
|
||||
func (fn constraintsOptFunc) SetGitOption(gi *GitInfo) {
|
||||
gi.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
|
||||
func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata {
|
||||
if m2.IgnoreCache {
|
||||
m1.IgnoreCache = true
|
||||
}
|
||||
@ -268,49 +311,77 @@ func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
|
||||
return m1
|
||||
}
|
||||
|
||||
var IgnoreCache = metadataOptFunc(func(md *OpMetadata) {
|
||||
md.IgnoreCache = true
|
||||
var IgnoreCache = constraintsOptFunc(func(c *Constraints) {
|
||||
c.Metadata.IgnoreCache = true
|
||||
})
|
||||
|
||||
func WithDescription(m map[string]string) MetadataOpt {
|
||||
return metadataOptFunc(func(md *OpMetadata) {
|
||||
md.Description = m
|
||||
func WithDescription(m map[string]string) ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
c.Metadata.Description = m
|
||||
})
|
||||
}
|
||||
|
||||
// WithExportCache forces results for this vertex to be exported with the cache
|
||||
func WithExportCache() MetadataOpt {
|
||||
return metadataOptFunc(func(md *OpMetadata) {
|
||||
md.ExportCache = &pb.ExportCache{Value: true}
|
||||
func WithExportCache() ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
c.Metadata.ExportCache = &pb.ExportCache{Value: true}
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutExportCache sets results for this vertex to be not exported with
|
||||
// the cache
|
||||
func WithoutExportCache() MetadataOpt {
|
||||
return metadataOptFunc(func(md *OpMetadata) {
|
||||
func WithoutExportCache() ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
// ExportCache with value false means to disable exporting
|
||||
md.ExportCache = &pb.ExportCache{Value: false}
|
||||
c.Metadata.ExportCache = &pb.ExportCache{Value: false}
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutDefaultExportCache resets the cache export for the vertex to use
|
||||
// the default defined by the build configuration.
|
||||
func WithoutDefaultExportCache() MetadataOpt {
|
||||
return metadataOptFunc(func(md *OpMetadata) {
|
||||
func WithoutDefaultExportCache() ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
// nil means no vertex based config has been set
|
||||
md.ExportCache = nil
|
||||
c.Metadata.ExportCache = nil
|
||||
})
|
||||
}
|
||||
|
||||
type opMetaWrapper struct {
|
||||
OpMetadata
|
||||
type constraintsWrapper struct {
|
||||
Constraints
|
||||
}
|
||||
|
||||
func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) {
|
||||
f(&mw.OpMetadata)
|
||||
func (cw *constraintsWrapper) applyConstraints(f func(c *Constraints)) {
|
||||
f(&cw.Constraints)
|
||||
}
|
||||
|
||||
func (mw *opMetaWrapper) Metadata() OpMetadata {
|
||||
return mw.OpMetadata
|
||||
type Constraints struct {
|
||||
Platform *specs.Platform
|
||||
WorkerConstraints []string
|
||||
Metadata pb.OpMetadata
|
||||
}
|
||||
|
||||
func Platform(p specs.Platform) ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
c.Platform = &p
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"})
|
||||
LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"})
|
||||
LinuxArm = LinuxArmhf
|
||||
LinuxArmel = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
|
||||
LinuxArm64 = Platform(specs.Platform{OS: "linux", Architecture: "arm64"})
|
||||
LinuxS390x = Platform(specs.Platform{OS: "linux", Architecture: "s390x"})
|
||||
LinuxPpc64le = Platform(specs.Platform{OS: "linux", Architecture: "ppc64le"})
|
||||
Darwin = Platform(specs.Platform{OS: "darwin", Architecture: "amd64"})
|
||||
Windows = Platform(specs.Platform{OS: "windows", Architecture: "amd64"})
|
||||
)
|
||||
|
||||
func Require(filters ...string) ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
for _, f := range filters {
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, f)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
26
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
26
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
@ -4,12 +4,15 @@ import (
|
||||
"context"
|
||||
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type WorkerInfo struct {
|
||||
ID string
|
||||
Labels map[string]string
|
||||
ID string
|
||||
Labels map[string]string
|
||||
Platforms []specs.Platform
|
||||
}
|
||||
|
||||
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
|
||||
@ -28,8 +31,9 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
|
||||
|
||||
for _, w := range resp.Record {
|
||||
wi = append(wi, &WorkerInfo{
|
||||
ID: w.ID,
|
||||
Labels: w.Labels,
|
||||
ID: w.ID,
|
||||
Labels: w.Labels,
|
||||
Platforms: toClientPlatforms(w.Platforms),
|
||||
})
|
||||
}
|
||||
|
||||
@ -47,3 +51,17 @@ func WithWorkerFilter(f []string) ListWorkersOption {
|
||||
wi.Filter = f
|
||||
}
|
||||
}
|
||||
|
||||
func toClientPlatforms(p []pb.Platform) []specs.Platform {
|
||||
out := make([]specs.Platform, 0, len(p))
|
||||
for _, pp := range p {
|
||||
out = append(out, specs.Platform{
|
||||
OS: pp.OS,
|
||||
Architecture: pp.Architecture,
|
||||
Variant: pp.Variant,
|
||||
OSVersion: pp.OSVersion,
|
||||
OSFeatures: pp.OSFeatures,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
819
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
819
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
37
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
37
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
@ -15,7 +15,18 @@ message Op {
|
||||
SourceOp source = 3;
|
||||
CopyOp copy = 4;
|
||||
BuildOp build = 5;
|
||||
}
|
||||
}
|
||||
Platform platform = 10;
|
||||
WorkerConstraints constraints = 11;
|
||||
}
|
||||
|
||||
// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform
|
||||
message Platform {
|
||||
string Architecture = 1;
|
||||
string OS = 2;
|
||||
string Variant = 3;
|
||||
string OSVersion = 4; // unused
|
||||
repeated string OSFeatures = 5; // unused
|
||||
}
|
||||
|
||||
// Input represents an input edge for an Op.
|
||||
@ -54,6 +65,7 @@ message Mount {
|
||||
CacheOpt cacheOpt = 20;
|
||||
}
|
||||
|
||||
// MountType defines a type of a mount from a supported set
|
||||
enum MountType {
|
||||
BIND = 0;
|
||||
SECRET = 1;
|
||||
@ -62,8 +74,22 @@ enum MountType {
|
||||
TMPFS = 4;
|
||||
}
|
||||
|
||||
// CacheOpt defines options specific to cache mounts
|
||||
message CacheOpt {
|
||||
// ID is an optional namespace for the mount
|
||||
string ID = 1;
|
||||
// Sharing is the sharing mode for the mount
|
||||
CacheSharingOpt sharing = 2;
|
||||
}
|
||||
|
||||
// CacheSharingOpt defines different sharing modes for cache mount
|
||||
enum CacheSharingOpt {
|
||||
// SHARED cache mount can be used concurrently by multiple writers
|
||||
SHARED = 0;
|
||||
// PRIVATE creates a new mount if there are multiple writers
|
||||
PRIVATE = 1;
|
||||
// LOCKED pauses second writer until first one releases the mount
|
||||
LOCKED = 2;
|
||||
}
|
||||
|
||||
// CopyOp copies files across Ops.
|
||||
@ -106,8 +132,9 @@ message OpMetadata {
|
||||
// ignore_cache specifies to ignore the cache for this Op.
|
||||
bool ignore_cache = 1;
|
||||
// Description can be used for keeping any text fields that builder doesn't parse
|
||||
map<string, string> description = 2;
|
||||
WorkerConstraint worker_constraint = 3;
|
||||
map<string, string> description = 2;
|
||||
// index 3 reserved for WorkerConstraint in previous versions
|
||||
// WorkerConstraint worker_constraint = 3;
|
||||
ExportCache export_cache = 4;
|
||||
}
|
||||
|
||||
@ -122,8 +149,8 @@ message ProxyEnv {
|
||||
string no_proxy = 4;
|
||||
}
|
||||
|
||||
// WorkerConstraint is experimental and likely to be changed.
|
||||
message WorkerConstraint {
|
||||
// WorkerConstraints defines conditions for the worker
|
||||
message WorkerConstraints {
|
||||
repeated string filter = 1; // containerd-style filter
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
@ -212,8 +212,9 @@ func (t *trace) update(s *client.SolveStatus) {
|
||||
if !ok {
|
||||
continue // shouldn't happen
|
||||
}
|
||||
i := 0
|
||||
complete := split(l.Data, byte('\n'), func(dt []byte) {
|
||||
if v.logsPartial && len(v.logs) != 0 {
|
||||
if v.logsPartial && len(v.logs) != 0 && i == 0 {
|
||||
v.logs[len(v.logs)-1] = append(v.logs[len(v.logs)-1], dt...)
|
||||
} else {
|
||||
ts := time.Duration(0)
|
||||
@ -222,6 +223,7 @@ func (t *trace) update(s *client.SolveStatus) {
|
||||
}
|
||||
v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%#.4g", ts.Seconds())[:5], dt)))
|
||||
}
|
||||
i++
|
||||
})
|
||||
v.logsPartial = !complete
|
||||
t.updates[v.Digest] = struct{}{}
|
||||
|
||||
6
vendor/github.com/moby/buildkit/vendor.conf
generated
vendored
6
vendor/github.com/moby/buildkit/vendor.conf
generated
vendored
@ -6,7 +6,7 @@ github.com/davecgh/go-spew v1.1.0
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
|
||||
|
||||
github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b
|
||||
github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667
|
||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
@ -18,7 +18,7 @@ github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/golang/protobuf v1.1.0
|
||||
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
|
||||
github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
|
||||
github.com/Microsoft/go-winio v0.4.7
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
@ -63,5 +63,5 @@ github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1c
|
||||
github.com/opencontainers/selinux 74a747aeaf2d66097b6908f572794f49f07dda2c
|
||||
|
||||
# used by dockerfile tests
|
||||
github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029
|
||||
gotest.tools v2.1.0
|
||||
github.com/google/go-cmp v0.2.0
|
||||
|
||||
63
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
63
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
@ -505,7 +505,8 @@ void join_namespaces(char *nslist)
|
||||
|
||||
ns->fd = fd;
|
||||
ns->ns = nsflag(namespace);
|
||||
strncpy(ns->path, path, PATH_MAX);
|
||||
strncpy(ns->path, path, PATH_MAX - 1);
|
||||
ns->path[PATH_MAX - 1] = '\0';
|
||||
} while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL);
|
||||
|
||||
/*
|
||||
@ -678,17 +679,15 @@ void nsexec(void)
|
||||
/*
|
||||
* Enable setgroups(2) if we've been asked to. But we also
|
||||
* have to explicitly disable setgroups(2) if we're
|
||||
* creating a rootless container (this is required since
|
||||
* Linux 3.19).
|
||||
* creating a rootless container for single-entry mapping.
|
||||
* i.e. config.is_setgroup == false.
|
||||
* (this is required since Linux 3.19).
|
||||
*
|
||||
* For rootless multi-entry mapping, config.is_setgroup shall be true and
|
||||
* newuidmap/newgidmap shall be used.
|
||||
*/
|
||||
if (config.is_rootless && config.is_setgroup) {
|
||||
kill(child, SIGKILL);
|
||||
bail("cannot allow setgroup in an unprivileged user namespace setup");
|
||||
}
|
||||
|
||||
if (config.is_setgroup)
|
||||
update_setgroups(child, SETGROUPS_ALLOW);
|
||||
if (config.is_rootless)
|
||||
if (config.is_rootless && !config.is_setgroup)
|
||||
update_setgroups(child, SETGROUPS_DENY);
|
||||
|
||||
/* Set up mappings. */
|
||||
@ -809,25 +808,30 @@ void nsexec(void)
|
||||
if (config.namespaces)
|
||||
join_namespaces(config.namespaces);
|
||||
|
||||
/*
|
||||
* Unshare all of the namespaces. Now, it should be noted that this
|
||||
* ordering might break in the future (especially with rootless
|
||||
* containers). But for now, it's not possible to split this into
|
||||
* CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
|
||||
*
|
||||
* Note that we don't merge this with clone() because there were
|
||||
* some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
|
||||
* was broken, so we'll just do it the long way anyway.
|
||||
*/
|
||||
if (unshare(config.cloneflags) < 0)
|
||||
bail("failed to unshare namespaces");
|
||||
|
||||
/*
|
||||
* Deal with user namespaces first. They are quite special, as they
|
||||
* affect our ability to unshare other namespaces and are used as
|
||||
* context for privilege checks.
|
||||
*
|
||||
* We don't unshare all namespaces in one go. The reason for this
|
||||
* is that, while the kernel documentation may claim otherwise,
|
||||
* there are certain cases where unsharing all namespaces at once
|
||||
* will result in namespace objects being owned incorrectly.
|
||||
* Ideally we should just fix these kernel bugs, but it's better to
|
||||
* be safe than sorry, and fix them separately.
|
||||
*
|
||||
* A specific case of this is that the SELinux label of the
|
||||
* internal kern-mount that mqueue uses will be incorrect if the
|
||||
* UTS namespace is cloned before the USER namespace is mapped.
|
||||
* I've also heard of similar problems with the network namespace
|
||||
* in some scenarios. This also mirrors how LXC deals with this
|
||||
* problem.
|
||||
*/
|
||||
if (config.cloneflags & CLONE_NEWUSER) {
|
||||
if (unshare(CLONE_NEWUSER) < 0)
|
||||
bail("failed to unshare user namespace");
|
||||
config.cloneflags &= ~CLONE_NEWUSER;
|
||||
|
||||
/*
|
||||
* We don't have the privileges to do any mapping here (see the
|
||||
* clone_parent rant). So signal our parent to hook us up.
|
||||
@ -853,8 +857,21 @@ void nsexec(void)
|
||||
if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
|
||||
bail("failed to set process as dumpable");
|
||||
}
|
||||
|
||||
/* Become root in the namespace proper. */
|
||||
if (setresuid(0, 0, 0) < 0)
|
||||
bail("failed to become root in user namespace");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unshare all of the namespaces. Note that we don't merge this
|
||||
* with clone() because there were some old kernel versions where
|
||||
* clone(CLONE_PARENT | CLONE_NEWPID) was broken, so we'll just do
|
||||
* it the long way.
|
||||
*/
|
||||
if (unshare(config.cloneflags) < 0)
|
||||
bail("failed to unshare namespaces");
|
||||
|
||||
/*
|
||||
* TODO: What about non-namespace clone flags that we're dropping here?
|
||||
*
|
||||
|
||||
38
vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
generated
vendored
38
vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
generated
vendored
@ -3,13 +3,12 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall" // only for exec
|
||||
"unsafe"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -102,34 +101,43 @@ func Setctty() error {
|
||||
}
|
||||
|
||||
// RunningInUserNS detects whether we are currently running in a user namespace.
|
||||
// Copied from github.com/lxc/lxd/shared/util.go
|
||||
// Originally copied from github.com/lxc/lxd/shared/util.go
|
||||
func RunningInUserNS() bool {
|
||||
file, err := os.Open("/proc/self/uid_map")
|
||||
uidmap, err := user.CurrentProcessUIDMap()
|
||||
if err != nil {
|
||||
// This kernel-provided file only exists if user namespaces are supported
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
return UIDMapInUserNS(uidmap)
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(file)
|
||||
l, _, err := buf.ReadLine()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
line := string(l)
|
||||
var a, b, c int64
|
||||
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
|
||||
func UIDMapInUserNS(uidmap []user.IDMap) bool {
|
||||
/*
|
||||
* We assume we are in the initial user namespace if we have a full
|
||||
* range - 4294967295 uids starting at uid 0.
|
||||
*/
|
||||
if a == 0 && b == 0 && c == 4294967295 {
|
||||
if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetParentNSeuid returns the euid within the parent user namespace
|
||||
func GetParentNSeuid() int64 {
|
||||
euid := int64(os.Geteuid())
|
||||
uidmap, err := user.CurrentProcessUIDMap()
|
||||
if err != nil {
|
||||
// This kernel-provided file only exists if user namespaces are supported
|
||||
return euid
|
||||
}
|
||||
for _, um := range uidmap {
|
||||
if um.ID <= euid && euid <= um.ID+um.Count-1 {
|
||||
return um.ParentID + euid - um.ID
|
||||
}
|
||||
}
|
||||
return euid
|
||||
}
|
||||
|
||||
// SetSubreaper sets the value i as the subreaper setting for the calling process
|
||||
func SetSubreaper(i int) error {
|
||||
return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
|
||||
|
||||
18
vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
generated
vendored
18
vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
generated
vendored
@ -2,8 +2,26 @@
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
// RunningInUserNS is a stub for non-Linux systems
|
||||
// Always returns false
|
||||
func RunningInUserNS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UIDMapInUserNS is a stub for non-Linux systems
|
||||
// Always returns false
|
||||
func UIDMapInUserNS(uidmap []user.IDMap) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetParentNSeuid returns the euid within the parent user namespace
|
||||
// Always returns os.Geteuid on non-linux
|
||||
func GetParentNSeuid() int {
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
26
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
26
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
@ -114,3 +114,29 @@ func CurrentUser() (User, error) {
|
||||
func CurrentGroup() (Group, error) {
|
||||
return LookupGid(unix.Getgid())
|
||||
}
|
||||
|
||||
func CurrentUserSubUIDs() ([]SubID, error) {
|
||||
u, err := CurrentUser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseSubIDFileFilter("/etc/subuid",
|
||||
func(entry SubID) bool { return entry.Name == u.Name })
|
||||
}
|
||||
|
||||
func CurrentGroupSubGIDs() ([]SubID, error) {
|
||||
g, err := CurrentGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseSubIDFileFilter("/etc/subgid",
|
||||
func(entry SubID) bool { return entry.Name == g.Name })
|
||||
}
|
||||
|
||||
func CurrentProcessUIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/uid_map")
|
||||
}
|
||||
|
||||
func CurrentProcessGIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/gid_map")
|
||||
}
|
||||
|
||||
133
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
133
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
@ -75,12 +75,29 @@ func groupFromOS(g *user.Group) (Group, error) {
|
||||
return newGroup, nil
|
||||
}
|
||||
|
||||
// SubID represents an entry in /etc/sub{u,g}id
|
||||
type SubID struct {
|
||||
Name string
|
||||
SubID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
// IDMap represents an entry in /proc/PID/{u,g}id_map
|
||||
type IDMap struct {
|
||||
ID int64
|
||||
ParentID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
func parseLine(line string, v ...interface{}) {
|
||||
if line == "" {
|
||||
parseParts(strings.Split(line, ":"), v...)
|
||||
}
|
||||
|
||||
func parseParts(parts []string, v ...interface{}) {
|
||||
if len(parts) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.Split(line, ":")
|
||||
for i, p := range parts {
|
||||
// Ignore cases where we don't have enough fields to populate the arguments.
|
||||
// Some configuration files like to misbehave.
|
||||
@ -96,6 +113,8 @@ func parseLine(line string, v ...interface{}) {
|
||||
case *int:
|
||||
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
|
||||
*e, _ = strconv.Atoi(p)
|
||||
case *int64:
|
||||
*e, _ = strconv.ParseInt(p, 10, 64)
|
||||
case *[]string:
|
||||
// Comma-separated lists.
|
||||
if p != "" {
|
||||
@ -105,7 +124,7 @@ func parseLine(line string, v ...interface{}) {
|
||||
}
|
||||
default:
|
||||
// Someone goof'd when writing code using this function. Scream so they can hear us.
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -479,3 +498,111 @@ func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int
|
||||
}
|
||||
return GetAdditionalGroups(additionalGroups, group)
|
||||
}
|
||||
|
||||
func ParseSubIDFile(path string) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubID(subid)
|
||||
}
|
||||
|
||||
func ParseSubID(subid io.Reader) ([]SubID, error) {
|
||||
return ParseSubIDFilter(subid, nil)
|
||||
}
|
||||
|
||||
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubIDFilter(subid, filter)
|
||||
}
|
||||
|
||||
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for subid-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []SubID{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 subuid
|
||||
p := SubID{}
|
||||
parseLine(line, &p.Name, &p.SubID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseIDMapFile(path string) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMap(r)
|
||||
}
|
||||
|
||||
func ParseIDMap(r io.Reader) ([]IDMap, error) {
|
||||
return ParseIDMapFilter(r, nil)
|
||||
}
|
||||
|
||||
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMapFilter(r, filter)
|
||||
}
|
||||
|
||||
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for idmap-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []IDMap{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 7 user_namespaces
|
||||
p := IDMap{}
|
||||
parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
2
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user