Compare commits
62 Commits
v27.4.1+pa
...
v18.09.0-b
| Author | SHA1 | Date | |
|---|---|---|---|
| 4280972d65 | |||
| 984bc7411e | |||
| 92932647d3 | |||
| dee37936e5 | |||
| 3e1a0bdc23 | |||
| f2b2061cc3 | |||
| 4925fd9c34 | |||
| 5d3ab5bc0c | |||
| c12e23a4c1 | |||
| aca3f2d382 | |||
| a7488d1bcd | |||
| 5a97a93ae1 | |||
| 41910b6d68 | |||
| 1a087e87c9 | |||
| 0b11120060 | |||
| e57b20642d | |||
| b8702b8a9a | |||
| a31b20d7db | |||
| 5ba5678898 | |||
| 9de1318e36 | |||
| 19e1ab273e | |||
| ec1812188f | |||
| 6004d74b1f | |||
| e79e591ee9 | |||
| 0f22d7e295 | |||
| f250152bf4 | |||
| f9d666b057 | |||
| 342afe44fb | |||
| cfec8027ed | |||
| 78c42cf031 | |||
| dd2f13bed4 | |||
| 3b991ec615 | |||
| 34ea8bb5a5 | |||
| afb17ec70b | |||
| 62aed95bc1 | |||
| 649e4916bb | |||
| 3597d75281 | |||
| 5673816fec | |||
| a8c69c8287 | |||
| fc3dc8f058 | |||
| 2a46a3d46c | |||
| b2cf18ac2e | |||
| 44371c7c34 | |||
| 4e6798794d | |||
| d8aefad94a | |||
| 3c37d6a034 | |||
| 9d43f1ed48 | |||
| a818677813 | |||
| c204959687 | |||
| 76c09259db | |||
| 0efb62cab1 | |||
| 8789e93d6e | |||
| 0ee05a6353 | |||
| 68be7cb376 | |||
| de805da04c | |||
| b75350de7a | |||
| f96ddaedf7 | |||
| 0fb6bb35a4 | |||
| 264ee43c2a | |||
| 7f4c842e8a | |||
| e25e9d68be | |||
| 6877dedeee |
4
Makefile
4
Makefile
@ -12,14 +12,14 @@ clean: ## remove build artifacts
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit test
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: test
|
||||
test: test-unit ## run tests
|
||||
|
||||
.PHONY: test-coverage
|
||||
test-coverage: ## run test coverage
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## run all the lint tools
|
||||
|
||||
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.10.3
|
||||
GOVERSION: 1.10.4
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
|
||||
@ -3,29 +3,94 @@ package builder
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
keepStorage opts.MemBytes
|
||||
}
|
||||
|
||||
// NewPruneCommand returns a new cobra prune command for images
|
||||
func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Remove build cache",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background())
|
||||
spaceReclaimed, output, err := runPrune(dockerCli, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(report.SpaceReclaimed)))
|
||||
if output != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), output)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
|
||||
return nil
|
||||
},
|
||||
Annotations: map[string]string{"version": "1.39"},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'max-age=24h')")
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
const (
|
||||
normalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`
|
||||
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := options.filter.Value()
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
warning := normalWarning
|
||||
if options.all {
|
||||
warning = allCacheWarning
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return 0, "", nil
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{
|
||||
All: options.all,
|
||||
KeepStorage: options.keepStorage.Value(),
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if len(report.CachesDeleted) > 0 {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Deleted build cache objects:\n")
|
||||
for _, id := range report.CachesDeleted {
|
||||
sb.WriteString(id)
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
output = sb.String()
|
||||
}
|
||||
|
||||
return report.SpaceReclaimed, output, nil
|
||||
}
|
||||
|
||||
// CachePrune executes a prune command for build cache
|
||||
func CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})
|
||||
}
|
||||
|
||||
@ -19,8 +19,8 @@ import (
|
||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
@ -55,20 +55,21 @@ type Cli interface {
|
||||
ManifestStore() manifeststore.Store
|
||||
RegistryClient(bool) registryclient.RegistryClient
|
||||
ContentTrustEnabled() bool
|
||||
NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error)
|
||||
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
serverInfo ServerInfo
|
||||
clientInfo ClientInfo
|
||||
contentTrust bool
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
serverInfo ServerInfo
|
||||
clientInfo ClientInfo
|
||||
contentTrust bool
|
||||
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
|
||||
}
|
||||
|
||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||
@ -233,8 +234,8 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
|
||||
}
|
||||
|
||||
// NewContainerizedEngineClient returns a containerized engine client
|
||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
|
||||
return containerizedengine.NewClient(sockPath)
|
||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
return cli.newContainerizeClient(sockPath)
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
@ -252,8 +253,8 @@ type ClientInfo struct {
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted}
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool, containerizedFn func(string) (clitypes.ContainerizedClient, error)) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted, newContainerizeClient: containerizedFn}
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
|
||||
@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/builder"
|
||||
@ -85,9 +86,6 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
// volume
|
||||
volume.NewVolumeCommand(dockerCli),
|
||||
|
||||
// engine
|
||||
engine.NewEngineCommand(dockerCli),
|
||||
|
||||
// legacy commands may be hidden
|
||||
hide(system.NewEventsCommand(dockerCli)),
|
||||
hide(system.NewInfoCommand(dockerCli)),
|
||||
@ -124,7 +122,10 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
hide(image.NewSaveCommand(dockerCli)),
|
||||
hide(image.NewTagCommand(dockerCli)),
|
||||
)
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
// engine
|
||||
cmd.AddCommand(engine.NewEngineCommand(dockerCli))
|
||||
}
|
||||
}
|
||||
|
||||
func hide(cmd *cobra.Command) *cobra.Command {
|
||||
|
||||
@ -73,6 +73,6 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
||||
|
||||
// RunPrune calls the Container Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
}
|
||||
|
||||
@ -3,11 +3,12 @@ package engine
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/licenseutils"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/licensing/model"
|
||||
"github.com/pkg/errors"
|
||||
@ -15,19 +16,21 @@ import (
|
||||
)
|
||||
|
||||
type activateOptions struct {
|
||||
licenseFile string
|
||||
version string
|
||||
registryPrefix string
|
||||
format string
|
||||
image string
|
||||
quiet bool
|
||||
displayOnly bool
|
||||
sockPath string
|
||||
licenseFile string
|
||||
version string
|
||||
registryPrefix string
|
||||
format string
|
||||
image string
|
||||
quiet bool
|
||||
displayOnly bool
|
||||
sockPath string
|
||||
licenseLoginFunc func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error)
|
||||
}
|
||||
|
||||
// newActivateCommand creates a new `docker engine activate` command
|
||||
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options activateOptions
|
||||
options.licenseLoginFunc = licenseutils.Login
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "activate [OPTIONS]",
|
||||
@ -56,10 +59,10 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
||||
|
||||
flags.StringVar(&options.licenseFile, "license", "", "License File")
|
||||
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.image, "engine-image", containerizedengine.EnterpriseEngineImage, "Specify engine image")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.image, "engine-image", "", "Specify engine image")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
|
||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display the available licenses and exit")
|
||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display license information and exit")
|
||||
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
@ -67,6 +70,9 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
||||
}
|
||||
|
||||
func runActivate(cli command.Cli, options activateOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := cli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
@ -94,26 +100,48 @@ func runActivate(cli command.Cli, options activateOptions) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = licenseutils.ApplyLicense(ctx, cli.Client(), license); err != nil {
|
||||
summary, err := licenseutils.GetLicenseSummary(ctx, *license)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "License: %s\n", summary)
|
||||
if options.displayOnly {
|
||||
return nil
|
||||
}
|
||||
dclient := cli.Client()
|
||||
if err = licenseutils.ApplyLicense(ctx, dclient, license); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := containerizedengine.EngineInitOptions{
|
||||
// Short circuit if the user didn't specify a version and we're already running enterprise
|
||||
if options.version == "" {
|
||||
serverVersion, err := dclient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(strings.ToLower(serverVersion.Platform.Name), "enterprise") {
|
||||
fmt.Fprintln(cli.Out(), "Successfully activated engine license on existing enterprise engine.")
|
||||
return nil
|
||||
}
|
||||
options.version = serverVersion.Version
|
||||
}
|
||||
|
||||
opts := clitypes.EngineInitOptions{
|
||||
RegistryPrefix: options.registryPrefix,
|
||||
EngineImage: options.image,
|
||||
EngineVersion: options.version,
|
||||
}
|
||||
|
||||
return client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := cli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
})
|
||||
if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), `Successfully activated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the activation.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
|
||||
user, err := licenseutils.Login(ctx, authConfig)
|
||||
user, err := options.licenseLoginFunc(ctx, authConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1,19 +1,35 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/licenseutils"
|
||||
"github.com/docker/cli/internal/test"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/licensing"
|
||||
"github.com/docker/licensing/model"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/fs"
|
||||
"gotest.tools/golden"
|
||||
)
|
||||
|
||||
const (
|
||||
// nolint: lll
|
||||
expiredLicense = `{"key_id":"irlYm3b9fdD8hMUXjazF39im7VQSSbAm9tfHK8cKUxJt","private_key":"aH5tTRDAVJpCRS2CRetTQVXIKgWUPfoCHODhDvNPvAbz","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPQzB3TXkweE9GUXdOem93TURvd01Gb2lMQ0owYjJ0bGJpSTZJbkZtTVMxMlVtRmtialp5YjFaMldXdHJlVXN4VFdKMGNGUmpXR1ozVjA4MVRWZFFTM2cwUnpJd2NIYzlJaXdpYldGNFJXNW5hVzVsY3lJNk1Td2ljMk5oYm01cGJtZEZibUZpYkdWa0lqcDBjblZsTENKc2FXTmxibk5sVkhsd1pTSTZJazltWm14cGJtVWlMQ0owYVdWeUlqb2lVSEp2WkhWamRHbHZiaUo5IiwKICAgInNpZ25hdHVyZXMiOiBbCiAgICAgIHsKICAgICAgICAgImhlYWRlciI6IHsKICAgICAgICAgICAgImp3ayI6IHsKICAgICAgICAgICAgICAgImUiOiAiQVFBQiIsCiAgICAgICAgICAgICAgICJrZXlJRCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJraWQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia3R5IjogIlJTQSIsCiAgICAgICAgICAgICAgICJuIjogInlkSXktbFU3bzdQY2VZLTQtcy1DUTVPRWdDeUY4Q3hJY1FJV3VLODRwSWlaY2lZNjczMHlDWW53TFNLVGx3LVU2VUNfUVJlV1Jpb01OTkU1RHM1VFlFWGJHRzZvbG0ycWRXYkJ3Y0NnLTJVVUhfT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTFJdi1tcTdscDVuUV9SeHJUMEtaUkFRVFlMRU1FZkd3bTNoTU9fZ2VMUFMtaGdLUHRJSGxrZzZfV2NveFRHb0tQNzlkX3dhSFl4R05sN1doU25laUJTeGJwYlFBS2syMWxnNzk4WGI3dlp5RUFURE1yUlI5TWVFNkFkajVISnBZM0NveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTy13YTJLTjhVd2dJSW0waTVJMXVXOUdrcTR6akJ5NXpoZ3F1VVhiRzliV1BBT1lycTVRYTgxRHhHY0JsSnlIWUFwLUREUEU5VEdnNHpZbVhqSm54WnFIRWR1R3FkZXZaOFhNSTB1a2ZrR0lJMTR3VU9pTUlJSXJYbEVjQmZfNDZJOGdRV0R6eHljWmVfSkdYLUxBdWF5WHJ5clVGZWhWTlVkWlVsOXdYTmFKQi1rYUNxejVRd2FSOTNzR3ctUVNmdEQwTnZMZTdDeU9ILUU2dmc2U3RfTmVUdmd2OFluaENpWElsWjhIT2ZJd05lN3RFRl9VY3o1T2JQeWttM3R5bHJOVWp0MFZ5QW10dGFjVkkyaUdpaGNVUHJtazRsVklaN1ZEX0xTVy1pN3lvU3VydHBzUFhjZTJwS0RJbzMwbEpHaE9fM0tVbWwyU1VaQ3F6SjF5RW1LcHlzSDVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVIgogICAgICAgICAgICB9LAogICAgICAgICAgICAiYWxnIjogIlJTMjU2IgogICAgICAgICB9LAogICAgICAgICAic2lnbmF0dXJlIjogIm5saTZIdzRrbW5KcTBSUmRXaGVfbkhZS2VJLVpKenM1U0d5SUpDakh1dWtnVzhBYklpVzFZYWJJR2NqWUt0QTY4dWN6T1hyUXZreGxWQXJLSlgzMDJzN0RpbzcxTlNPRzJVcnhsSjlibDFpd0F3a3ZyTEQ2T0p5MGxGLVg4WnRabXhPVmNQZmwzcmJwZFQ0dnlnWTdNcU1QRXdmb0IxTmlWZDYyZ1cxU2NSREZZcWw3R0FVaFVKNkp4QU15VzVaOXl5YVE0NV8wd0RMUk5mRjA5YWNXeVowTjRxVS1hZjhrUTZUUWZUX05ERzNCR3pRb2V3cHlEajRiMFBHb0diOFhLdDlwekpFdEdxM3lQM25VMFFBbk90a2gwTnZac1l1UFcyUnhDT3lRNEYzVlR3UkF2eF9HSTZrMVRpYmlKNnByUWluUy16Sjh6RE8zUjBuakE3OFBwNXcxcVpaUE9BdmtzZFNSYzJDcVMtcWhpTmF5YUhOVHpVNnpyOXlOZHR2S0o1QjNST0FmNUtjYXNiWURjTnVpeXBUNk90LUtqQ2I1dmYtWVpnc2FRNzJBdFBhSU4yeUpNREZHbmEwM0hpSjMxcTJRUlp5eTZrd3RYaGtwcDhTdEdIcHYxSWRaV09SVWttb0g5SFBzSGk4SExRLTZlM0tEY2x1RUQyMTNpZnljaVhtN0YzdHdaTTNHeDd1UXR1SldHaUlTZ2Z0QW9lVjZfUmI2VThkMmZxNzZuWHYxak5nckRRcE5waEZFd2tCdGRtZHZ2THByZVVYX3BWangza1AxN3pWbXFKNmNOOWkwWUc4WHg2VmRzcUxsRXUxQ2Rhd3Q0eko1M3VHMFlKTjRnUDZwc25yUS1uM0U1aFdlMDJ3d3dBZ3F3bGlPdmd4V1RTeXJyLXY2eDI0IiwKICAgICAgICAgInByb3RlY3RlZCI6ICJleUptYjNKdFlYUk1aVzVuZEdnaU9qRTNNeXdpWm05eWJXRjBWR0ZwYkNJNkltWlJJaXdpZEdsdFpTSTZJakl3TVRjdE1EVXRNRFZVTWpFNk5UYzZNek5hSW4wIgogICAgICB9CiAgIF0KfQ=="}`
|
||||
)
|
||||
|
||||
func TestActivateNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newActivateCommand(testCli)
|
||||
cmd.Flags().Set("license", "invalidpath")
|
||||
cmd.SilenceUsage = true
|
||||
@ -24,10 +40,11 @@ func TestActivateNoContainerd(t *testing.T) {
|
||||
|
||||
func TestActivateBadLicense(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newActivateCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
@ -35,3 +52,95 @@ func TestActivateBadLicense(t *testing.T) {
|
||||
err := cmd.Execute()
|
||||
assert.Error(t, err, "open invalidpath: no such file or directory")
|
||||
}
|
||||
|
||||
func TestActivateExpiredLicenseDryRun(t *testing.T) {
|
||||
dir := fs.NewDir(t, "license", fs.WithFile("docker.lic", expiredLicense, fs.WithMode(0644)))
|
||||
defer dir.Remove()
|
||||
filename := dir.Join("docker.lic")
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newActivateCommand(c)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
cmd.Flags().Set("license", filename)
|
||||
cmd.Flags().Set("display-only", "true")
|
||||
c.OutBuffer().Reset()
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, c.OutBuffer().String(), "expired-license-display-only.golden")
|
||||
}
|
||||
|
||||
type mockLicenseClient struct{}
|
||||
|
||||
func (c mockLicenseClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c mockLicenseClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||
expires := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
return []*model.Subscription{
|
||||
{
|
||||
State: "active",
|
||||
Expires: &expires,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
func (c mockLicenseClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) SummarizeLicense(res *model.CheckResponse, keyID string) *model.Subscription {
|
||||
return nil
|
||||
}
|
||||
func TestActivateDisplayOnlyHub(t *testing.T) {
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
|
||||
hubUser := licenseutils.HubUser{
|
||||
Client: mockLicenseClient{},
|
||||
}
|
||||
options := activateOptions{
|
||||
licenseLoginFunc: func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) {
|
||||
return hubUser, nil
|
||||
},
|
||||
displayOnly: true,
|
||||
}
|
||||
c.OutBuffer().Reset()
|
||||
err := runActivate(c, options)
|
||||
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, c.OutBuffer().String(), "expired-hub-license-display-only.golden")
|
||||
}
|
||||
|
||||
13
cli/command/engine/activate_unix.go
Normal file
13
cli/command/engine/activate_unix.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows
|
||||
|
||||
package engine
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
isRoot = func() bool {
|
||||
return unix.Geteuid() == 0
|
||||
}
|
||||
)
|
||||
9
cli/command/engine/activate_windows.go
Normal file
9
cli/command/engine/activate_windows.go
Normal file
@ -0,0 +1,9 @@
|
||||
// +build windows
|
||||
|
||||
package engine
|
||||
|
||||
var (
|
||||
isRoot = func() bool {
|
||||
return true
|
||||
}
|
||||
)
|
||||
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
@ -13,7 +14,7 @@ import (
|
||||
|
||||
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
|
||||
if registryPrefix == "" {
|
||||
registryPrefix = "docker.io/docker"
|
||||
registryPrefix = clitypes.RegistryPrefix
|
||||
}
|
||||
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
|
||||
if err != nil {
|
||||
|
||||
@ -7,18 +7,16 @@ import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/versions"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
releaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
)
|
||||
|
||||
type checkOptions struct {
|
||||
registryPrefix string
|
||||
preReleases bool
|
||||
engineImage string
|
||||
downgrades bool
|
||||
upgrades bool
|
||||
format string
|
||||
@ -38,9 +36,10 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "", "Override the existing location where engine images are pulled")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled")
|
||||
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
|
||||
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
|
||||
flags.StringVar(&options.engineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
||||
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
|
||||
@ -50,54 +49,47 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
}
|
||||
|
||||
func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||
client := dockerCli.Client()
|
||||
serverVersion, err := client.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// override with user provided prefix if specified
|
||||
if options.registryPrefix != "" {
|
||||
currentOpts.RegistryPrefix = options.registryPrefix
|
||||
}
|
||||
imageName := currentOpts.RegistryPrefix + "/" + currentOpts.EngineImage
|
||||
currentVersion := currentOpts.EngineVersion
|
||||
versions, err := client.GetEngineVersions(ctx, dockerCli.RegistryClient(false), currentVersion, imageName)
|
||||
availVersions, err := versions.GetEngineVersions(ctx, dockerCli.RegistryClient(false), options.registryPrefix, options.engineImage, serverVersion.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
availUpdates := []containerizedengine.Update{
|
||||
{Type: "current", Version: currentVersion},
|
||||
availUpdates := []clitypes.Update{
|
||||
{Type: "current", Version: serverVersion.Version},
|
||||
}
|
||||
if len(versions.Patches) > 0 {
|
||||
if len(availVersions.Patches) > 0 {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"patch",
|
||||
options.preReleases,
|
||||
versions.Patches)...)
|
||||
availVersions.Patches)...)
|
||||
}
|
||||
if options.upgrades {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"upgrade",
|
||||
options.preReleases,
|
||||
versions.Upgrades)...)
|
||||
availVersions.Upgrades)...)
|
||||
}
|
||||
if options.downgrades {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"downgrade",
|
||||
options.preReleases,
|
||||
versions.Downgrades)...)
|
||||
availVersions.Downgrades)...)
|
||||
}
|
||||
|
||||
format := options.format
|
||||
@ -115,17 +107,17 @@ func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||
|
||||
func processVersions(currentVersion, verType string,
|
||||
includePrerelease bool,
|
||||
versions []containerizedengine.DockerVersion) []containerizedengine.Update {
|
||||
availUpdates := []containerizedengine.Update{}
|
||||
for _, ver := range versions {
|
||||
availVersions []clitypes.DockerVersion) []clitypes.Update {
|
||||
availUpdates := []clitypes.Update{}
|
||||
for _, ver := range availVersions {
|
||||
if !includePrerelease && ver.Prerelease() != "" {
|
||||
continue
|
||||
}
|
||||
if ver.Tag != currentVersion {
|
||||
availUpdates = append(availUpdates, containerizedengine.Update{
|
||||
availUpdates = append(availUpdates, clitypes.Update{
|
||||
Type: verType,
|
||||
Version: ver.Tag,
|
||||
Notes: fmt.Sprintf("%s/%s", releaseNotePrefix, ver.Tag),
|
||||
Notes: fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, ver.Tag),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,11 +5,13 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/golden"
|
||||
)
|
||||
@ -18,126 +20,95 @@ var (
|
||||
testCli = test.NewFakeCli(&client.Client{})
|
||||
)
|
||||
|
||||
func TestCheckForUpdatesNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
type verClient struct {
|
||||
client.Client
|
||||
ver types.Version
|
||||
verErr error
|
||||
info types.Info
|
||||
infoErr error
|
||||
}
|
||||
|
||||
func (c *verClient) ServerVersion(ctx context.Context) (types.Version, error) {
|
||||
return c.ver, c.verErr
|
||||
}
|
||||
|
||||
func (c *verClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.info, c.infoErr
|
||||
}
|
||||
|
||||
type testRegistryClient struct {
|
||||
tags []string
|
||||
}
|
||||
|
||||
func (c testRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
return manifesttypes.ImageManifest{}, nil
|
||||
}
|
||||
func (c testRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c testRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c testRegistryClient) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (c testRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
||||
return c.tags, nil
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
|
||||
retErr := fmt.Errorf("some failure")
|
||||
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
return containerizedengine.EngineInitOptions{}, retErr
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetRegistryClient(testRegistryClient{})
|
||||
cmd := newCheckForUpdatesCommand(c)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err == retErr)
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesGetEngineVersionsFail(t *testing.T) {
|
||||
retErr := fmt.Errorf("some failure")
|
||||
getEngineVersionsFunc := func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
return containerizedengine.AvailableVersions{}, retErr
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err == retErr)
|
||||
assert.ErrorContains(t, err, "no such file or directory")
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
|
||||
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
return containerizedengine.EngineInitOptions{
|
||||
EngineImage: "current engine",
|
||||
EngineVersion: "1.1.0",
|
||||
}, nil
|
||||
}
|
||||
getEngineVersionsFunc := func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
return containerizedengine.AvailableVersions{
|
||||
Downgrades: parseVersions(t, "1.0.1", "1.0.2", "1.0.3-beta1"),
|
||||
Patches: parseVersions(t, "1.1.1", "1.1.2", "1.1.3-beta1"),
|
||||
Upgrades: parseVersions(t, "1.2.0", "2.0.0", "2.1.0-beta1"),
|
||||
}, nil
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
||||
c.SetRegistryClient(testRegistryClient{[]string{
|
||||
"1.0.1", "1.0.2", "1.0.3-beta1",
|
||||
"1.1.1", "1.1.2", "1.1.3-beta1",
|
||||
"1.2.0", "2.0.0", "2.1.0-beta1",
|
||||
}})
|
||||
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newCheckForUpdatesCommand(c)
|
||||
cmd.Flags().Set("pre-releases", "true")
|
||||
cmd.Flags().Set("downgrades", "true")
|
||||
cmd.Flags().Set("engine-image", "engine-community")
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-all.golden")
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-all.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "true")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-no-prerelease.golden")
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-no-prerelease.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "false")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-no-downgrades.golden")
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-no-downgrades.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "false")
|
||||
cmd.Flags().Set("upgrades", "false")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-patches-only.golden")
|
||||
}
|
||||
|
||||
func makeVersion(t *testing.T, tag string) containerizedengine.DockerVersion {
|
||||
v, err := ver.NewVersion(tag)
|
||||
assert.NilError(t, err)
|
||||
return containerizedengine.DockerVersion{Version: *v, Tag: tag}
|
||||
}
|
||||
|
||||
func parseVersions(t *testing.T, tags ...string) []containerizedengine.DockerVersion {
|
||||
ret := make([]containerizedengine.DockerVersion, len(tags))
|
||||
for i, tag := range tags {
|
||||
ret[i] = makeVersion(t, tag)
|
||||
}
|
||||
return ret
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-patches-only.golden")
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
@ -13,28 +13,26 @@ type (
|
||||
fakeContainerizedEngineClient struct {
|
||||
closeFunc func() error
|
||||
activateEngineFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
initEngineFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
doUpdateFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
getEngineVersionsFunc func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion,
|
||||
imageName string) (containerizedengine.AvailableVersions, error)
|
||||
imageName string) (clitypes.AvailableVersions, error)
|
||||
|
||||
getEngineFunc func(ctx context.Context) (containerd.Container, error)
|
||||
removeEngineFunc func(ctx context.Context, engine containerd.Container) error
|
||||
getCurrentEngineVersionFunc func(ctx context.Context) (containerizedengine.EngineInitOptions, error)
|
||||
removeEngineFunc func(ctx context.Context) error
|
||||
getCurrentEngineVersionFunc func(ctx context.Context) (clitypes.EngineInitOptions, error)
|
||||
}
|
||||
)
|
||||
|
||||
@ -46,18 +44,17 @@ func (w *fakeContainerizedEngineClient) Close() error {
|
||||
}
|
||||
|
||||
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
if w.activateEngineFunc != nil {
|
||||
return w.activateEngineFunc(ctx, opts, out, authConfig, healthfn)
|
||||
return w.activateEngineFunc(ctx, opts, out, authConfig)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
if w.initEngineFunc != nil {
|
||||
@ -66,23 +63,22 @@ func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
if w.doUpdateFunc != nil {
|
||||
return w.doUpdateFunc(ctx, opts, out, authConfig, healthfn)
|
||||
return w.doUpdateFunc(ctx, opts, out, authConfig)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
currentVersion, imageName string) (clitypes.AvailableVersions, error) {
|
||||
|
||||
if w.getEngineVersionsFunc != nil {
|
||||
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
|
||||
}
|
||||
return containerizedengine.AvailableVersions{}, nil
|
||||
return clitypes.AvailableVersions{}, nil
|
||||
}
|
||||
|
||||
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||
@ -91,15 +87,15 @@ func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containe
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
|
||||
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context) error {
|
||||
if w.removeEngineFunc != nil {
|
||||
return w.removeEngineFunc(ctx, engine)
|
||||
return w.removeEngineFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (clitypes.EngineInitOptions, error) {
|
||||
if w.getCurrentEngineVersionFunc != nil {
|
||||
return w.getCurrentEngineVersionFunc(ctx)
|
||||
}
|
||||
return containerizedengine.EngineInitOptions{}, nil
|
||||
return clitypes.EngineInitOptions{}, nil
|
||||
}
|
||||
|
||||
@ -15,11 +15,9 @@ func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newInitCommand(dockerCli),
|
||||
newActivateCommand(dockerCli),
|
||||
newCheckForUpdatesCommand(dockerCli),
|
||||
newUpdateCommand(dockerCli),
|
||||
newRmCommand(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -10,5 +10,5 @@ func TestNewEngineCommand(t *testing.T) {
|
||||
cmd := NewEngineCommand(testCli)
|
||||
|
||||
subcommands := cmd.Commands()
|
||||
assert.Assert(t, len(subcommands) == 5)
|
||||
assert.Assert(t, len(subcommands) == 3)
|
||||
}
|
||||
|
||||
@ -1,62 +1,10 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
)
|
||||
|
||||
type extendedEngineInitOptions struct {
|
||||
containerizedengine.EngineInitOptions
|
||||
clitypes.EngineInitOptions
|
||||
sockPath string
|
||||
}
|
||||
|
||||
func newInitCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options extendedEngineInitOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "init [OPTIONS]",
|
||||
Short: "Initialize a local engine",
|
||||
Long: `This command will initialize a local engine running on containerd.
|
||||
|
||||
Configuration of the engine is managed through the daemon.json configuration
|
||||
file on the host and may be pre-created before running the 'init' command.
|
||||
`,
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInit(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"experimentalCLI": ""},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.EngineVersion, "version", cli.Version, "Specify engine version")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", containerizedengine.CommunityEngineImage, "Specify engine image")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.ConfigFile, "config-file", "/etc/docker/daemon.json", "Specify the location of the daemon configuration file on the host")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runInit(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return client.InitEngine(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := dockerCli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,33 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestInitNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newInitCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
}
|
||||
|
||||
func TestInitHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newInitCommand(testCli)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
@ -1,54 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// TODO - consider adding a "purge" flag that also removes
|
||||
// configuration files and the docker root dir.
|
||||
|
||||
type rmOptions struct {
|
||||
sockPath string
|
||||
}
|
||||
|
||||
func newRmCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options rmOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS]",
|
||||
Short: "Remove the local engine",
|
||||
Long: `This command will remove the local engine running on containerd.
|
||||
|
||||
No state files will be removed from the host filesystem.
|
||||
`,
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRm(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"experimentalCLI": ""},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRm(dockerCli command.Cli, options rmOptions) error {
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
engine, err := client.GetEngine(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.RemoveEngine(ctx, engine)
|
||||
}
|
||||
@ -1,33 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestRmNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newRmCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
}
|
||||
|
||||
func TestRmHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newRmCommand(testCli)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
3
cli/command/engine/testdata/expired-hub-license-display-only.golden
vendored
Normal file
3
cli/command/engine/testdata/expired-hub-license-display-only.golden
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
Looking for existing licenses for ...
|
||||
NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
|
||||
0 2010-01-01 00:00:00 +0000 UTC
|
||||
1
cli/command/engine/testdata/expired-license-display-only.golden
vendored
Normal file
1
cli/command/engine/testdata/expired-license-display-only.golden
vendored
Normal file
@ -0,0 +1 @@
|
||||
License: Quantity: 1 Nodes Expiration date: 2018-03-18 Expired! You will no longer receive updates. Please renew at https://docker.com/licensing
|
||||
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -24,45 +25,31 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "", "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
if options.EngineImage == "" || options.RegistryPrefix == "" {
|
||||
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if options.EngineImage == "" {
|
||||
options.EngineImage = currentOpts.EngineImage
|
||||
}
|
||||
if options.RegistryPrefix == "" {
|
||||
options.RegistryPrefix = currentOpts.RegistryPrefix
|
||||
}
|
||||
}
|
||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := dockerCli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Success! The docker engine is now running.")
|
||||
fmt.Fprintln(dockerCli.Out(), `Successfully updated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the update.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4,13 +4,16 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/test"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestUpdateNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
@ -22,14 +25,16 @@ func TestUpdateNoContainerd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newUpdateCommand(testCli)
|
||||
cmd.Flags().Set("registry-prefix", "docker.io/docker")
|
||||
cmd := newUpdateCommand(c)
|
||||
cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix)
|
||||
cmd.Flags().Set("version", "someversion")
|
||||
cmd.Flags().Set("engine-image", "someimage")
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
179
cli/command/formatter/buildcache.go
Normal file
179
cli/command/formatter/buildcache.go
Normal file
@ -0,0 +1,179 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBuildCacheTableFormat = "table {{.ID}}\t{{.Type}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}\t{{.Description}}"
|
||||
|
||||
cacheIDHeader = "CACHE ID"
|
||||
cacheTypeHeader = "CACHE TYPE"
|
||||
parentHeader = "PARENT"
|
||||
lastUsedSinceHeader = "LAST USED"
|
||||
usageCountHeader = "USAGE"
|
||||
inUseHeader = "IN USE"
|
||||
sharedHeader = "SHARED"
|
||||
)
|
||||
|
||||
// NewBuildCacheFormat returns a Format for rendering using a Context
|
||||
func NewBuildCacheFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return Format(defaultBuildCacheTableFormat)
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `build_cache_id: {{.ID}}`
|
||||
}
|
||||
format := `build_cache_id: {{.ID}}
|
||||
parent_id: {{.Parent}}
|
||||
build_cache_type: {{.CacheType}}
|
||||
description: {{.Description}}
|
||||
created_at: {{.CreatedAt}}
|
||||
created_since: {{.CreatedSince}}
|
||||
last_used_at: {{.LastUsedAt}}
|
||||
last_used_since: {{.LastUsedSince}}
|
||||
usage_count: {{.UsageCount}}
|
||||
in_use: {{.InUse}}
|
||||
shared: {{.Shared}}
|
||||
`
|
||||
return Format(format)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func buildCacheSort(buildCache []*types.BuildCache) {
|
||||
sort.Slice(buildCache, func(i, j int) bool {
|
||||
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
|
||||
switch {
|
||||
case lui == nil && luj == nil:
|
||||
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
|
||||
case lui == nil:
|
||||
return true
|
||||
case luj == nil:
|
||||
return false
|
||||
case lui.Equal(*luj):
|
||||
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
|
||||
default:
|
||||
return lui.Before(*luj)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BuildCacheWrite renders the context for a list of containers
|
||||
func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
buildCacheSort(buildCaches)
|
||||
for _, bc := range buildCaches {
|
||||
err := format(&buildCacheContext{trunc: ctx.Trunc, v: bc})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newBuildCacheContext(), render)
|
||||
}
|
||||
|
||||
type buildCacheHeaderContext map[string]string
|
||||
|
||||
type buildCacheContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
v *types.BuildCache
|
||||
}
|
||||
|
||||
func newBuildCacheContext() *buildCacheContext {
|
||||
buildCacheCtx := buildCacheContext{}
|
||||
buildCacheCtx.header = buildCacheHeaderContext{
|
||||
"ID": cacheIDHeader,
|
||||
"Parent": parentHeader,
|
||||
"CacheType": cacheTypeHeader,
|
||||
"Size": sizeHeader,
|
||||
"CreatedSince": createdSinceHeader,
|
||||
"LastUsedSince": lastUsedSinceHeader,
|
||||
"UsageCount": usageCountHeader,
|
||||
"InUse": inUseHeader,
|
||||
"Shared": sharedHeader,
|
||||
"Description": descriptionHeader,
|
||||
}
|
||||
return &buildCacheCtx
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) ID() string {
|
||||
id := c.v.ID
|
||||
if c.trunc {
|
||||
id = stringid.TruncateID(c.v.ID)
|
||||
}
|
||||
if c.v.InUse {
|
||||
return id + "*"
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Parent() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.v.Parent)
|
||||
}
|
||||
return c.v.Parent
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CacheType() string {
|
||||
return c.v.Type
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Description() string {
|
||||
return c.v.Description
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Size() string {
|
||||
return units.HumanSizeWithPrecision(float64(c.v.Size), 3)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CreatedAt() string {
|
||||
return c.v.CreatedAt.String()
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CreatedSince() string {
|
||||
return units.HumanDuration(time.Now().UTC().Sub(c.v.CreatedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) LastUsedAt() string {
|
||||
if c.v.LastUsedAt == nil {
|
||||
return ""
|
||||
}
|
||||
return c.v.LastUsedAt.String()
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) LastUsedSince() string {
|
||||
if c.v.LastUsedAt == nil {
|
||||
return ""
|
||||
}
|
||||
return units.HumanDuration(time.Now().UTC().Sub(*c.v.LastUsedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) UsageCount() string {
|
||||
return fmt.Sprintf("%d", c.v.UsageCount)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) InUse() string {
|
||||
return fmt.Sprintf("%t", c.v.InUse)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Shared() string {
|
||||
return fmt.Sprintf("%t", c.v.Shared)
|
||||
}
|
||||
@ -12,19 +12,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
|
||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
|
||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||
defaultBuildCacheVerboseFormat = `
|
||||
ID: {{.ID}}
|
||||
Description: {{.Description}}
|
||||
Mutable: {{.Mutable}}
|
||||
Size: {{.Size}}
|
||||
CreatedAt: {{.CreatedAt}}
|
||||
LastUsedAt: {{.LastUsedAt}}
|
||||
UsageCount: {{.UsageCount}}
|
||||
`
|
||||
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
|
||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}"
|
||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||
defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}"
|
||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||
|
||||
typeHeader = "TYPE"
|
||||
totalHeader = "TOTAL"
|
||||
@ -32,7 +24,7 @@ UsageCount: {{.UsageCount}}
|
||||
reclaimableHeader = "RECLAIMABLE"
|
||||
containersHeader = "CONTAINERS"
|
||||
sharedSizeHeader = "SHARED SIZE"
|
||||
uniqueSizeHeader = "UNIQUE SiZE"
|
||||
uniqueSizeHeader = "UNIQUE SIZE"
|
||||
)
|
||||
|
||||
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
|
||||
@ -56,14 +48,26 @@ func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template,
|
||||
return ctx.parseFormat()
|
||||
}
|
||||
|
||||
//
|
||||
// NewDiskUsageFormat returns a format for rendering an DiskUsageContext
|
||||
func NewDiskUsageFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
format := defaultDiskUsageTableFormat
|
||||
return Format(format)
|
||||
case RawFormatKey:
|
||||
func NewDiskUsageFormat(source string, verbose bool) Format {
|
||||
switch {
|
||||
case verbose && source == RawFormatKey:
|
||||
format := `{{range .Images}}type: Image
|
||||
` + NewImageFormat(source, false, true) + `
|
||||
{{end -}}
|
||||
{{range .Containers}}type: Container
|
||||
` + NewContainerFormat(source, false, true) + `
|
||||
{{end -}}
|
||||
{{range .Volumes}}type: Volume
|
||||
` + NewVolumeFormat(source, false) + `
|
||||
{{end -}}
|
||||
{{range .BuildCache}}type: Build Cache
|
||||
` + NewBuildCacheFormat(source, false) + `
|
||||
{{end -}}`
|
||||
return format
|
||||
case !verbose && source == TableFormatKey:
|
||||
return Format(defaultDiskUsageTableFormat)
|
||||
case !verbose && source == RawFormatKey:
|
||||
format := `type: {{.Type}}
|
||||
total: {{.TotalCount}}
|
||||
active: {{.Active}}
|
||||
@ -71,8 +75,9 @@ size: {{.Size}}
|
||||
reclaimable: {{.Reclaimable}}
|
||||
`
|
||||
return Format(format)
|
||||
default:
|
||||
return Format(source)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) Write() (err error) {
|
||||
@ -129,14 +134,23 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
// First images
|
||||
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
type diskUsageContext struct {
|
||||
Images []*imageContext
|
||||
Containers []*containerContext
|
||||
Volumes []*volumeContext
|
||||
BuildCache []*buildCacheContext
|
||||
}
|
||||
|
||||
ctx.Output.Write([]byte("Images space usage:\n\n"))
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
duc := &diskUsageContext{
|
||||
Images: make([]*imageContext, 0, len(ctx.Images)),
|
||||
Containers: make([]*containerContext, 0, len(ctx.Containers)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
|
||||
}
|
||||
trunc := ctx.Format.IsTable()
|
||||
|
||||
// First images
|
||||
for _, i := range ctx.Images {
|
||||
repo := "<none>"
|
||||
tag := "<none>"
|
||||
@ -152,55 +166,92 @@ func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
}
|
||||
}
|
||||
|
||||
err := ctx.contextFormat(tmpl, &imageContext{
|
||||
duc.Images = append(duc.Images, &imageContext{
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
trunc: true,
|
||||
trunc: trunc,
|
||||
i: *i,
|
||||
})
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
// Now containers
|
||||
for _, c := range ctx.Containers {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
duc.Containers = append(duc.Containers, &containerContext{trunc: trunc, c: *c})
|
||||
}
|
||||
|
||||
// And volumes
|
||||
for _, v := range ctx.Volumes {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
|
||||
}
|
||||
|
||||
// And build cache
|
||||
buildCacheSort(ctx.BuildCache)
|
||||
for _, v := range ctx.BuildCache {
|
||||
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
|
||||
}
|
||||
|
||||
if ctx.Format == TableFormatKey {
|
||||
return ctx.verboseWriteTable(duc)
|
||||
}
|
||||
|
||||
ctx.preFormat()
|
||||
tmpl, err := ctx.parseFormat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tmpl.Execute(ctx.Output, duc)
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.Output.Write([]byte("Images space usage:\n\n"))
|
||||
for _, img := range duc.Images {
|
||||
if err := ctx.contextFormat(tmpl, img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newImageContext())
|
||||
|
||||
// Now containers
|
||||
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range ctx.Containers {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
err := ctx.contextFormat(tmpl, &containerContext{trunc: true, c: *c})
|
||||
if err != nil {
|
||||
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
|
||||
for _, c := range duc.Containers {
|
||||
if err := ctx.contextFormat(tmpl, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newContainerContext())
|
||||
|
||||
// And volumes
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range ctx.Volumes {
|
||||
if err := ctx.contextFormat(tmpl, &volumeContext{v: *v}); err != nil {
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
for _, v := range duc.Volumes {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newVolumeContext())
|
||||
|
||||
// And build cache
|
||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
|
||||
t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat))
|
||||
|
||||
for _, v := range ctx.BuildCache {
|
||||
t.Execute(ctx.Output, *v)
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageBuildCacheTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
for _, v := range duc.BuildCache {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newBuildCacheContext())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -416,7 +467,7 @@ func (c *diskUsageBuilderContext) Size() string {
|
||||
func (c *diskUsageBuilderContext) Reclaimable() string {
|
||||
var inUseBytes int64
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse {
|
||||
if bc.InUse && !bc.Shared {
|
||||
inUseBytes += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ func TestDiskUsageContextFormatWrite(t *testing.T) {
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
Format: NewDiskUsageFormat("table", false),
|
||||
},
|
||||
Verbose: false},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
@ -29,14 +29,14 @@ Build Cache 0 0 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true},
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("table", true)}},
|
||||
`Images space usage:
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS
|
||||
|
||||
Containers space usage:
|
||||
|
||||
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES
|
||||
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
|
||||
|
||||
Local Volumes space usage:
|
||||
|
||||
@ -44,8 +44,17 @@ VOLUME NAME LINKS SIZE
|
||||
|
||||
Build cache usage: 0B
|
||||
|
||||
CACHE ID CACHE TYPE SIZE CREATED LAST USED USAGE SHARED
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("raw", true)}},
|
||||
``,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("{{json .}}", true)}},
|
||||
`{"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}`,
|
||||
},
|
||||
// Errors
|
||||
{
|
||||
DiskUsageContext{
|
||||
@ -69,7 +78,7 @@ Build cache usage: 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
Format: NewDiskUsageFormat("table", false),
|
||||
},
|
||||
},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
@ -82,7 +91,7 @@ Build Cache 0 0 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}"),
|
||||
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}", false),
|
||||
},
|
||||
},
|
||||
string(golden.Get(t, "disk-usage-context-write-custom.golden")),
|
||||
@ -91,7 +100,7 @@ Build Cache 0 0 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("raw"),
|
||||
Format: NewDiskUsageFormat("raw", false),
|
||||
},
|
||||
},
|
||||
string(golden.Get(t, "disk-usage-raw-format.golden")),
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,7 +31,7 @@ func NewUpdatesFormat(source string, quiet bool) Format {
|
||||
}
|
||||
|
||||
// UpdatesWrite writes the context
|
||||
func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) error {
|
||||
func UpdatesWrite(ctx Context, availableUpdates []clitypes.Update) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, update := range availableUpdates {
|
||||
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
|
||||
@ -53,7 +53,7 @@ func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) er
|
||||
type updateContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
u containerizedengine.Update
|
||||
u clitypes.Update
|
||||
}
|
||||
|
||||
func (c *updateContext) MarshalJSON() ([]byte, error) {
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
@ -84,7 +84,7 @@ version2
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1", Notes: "description 1"},
|
||||
{Type: "updateType2", Version: "version2", Notes: "description 2"},
|
||||
}
|
||||
@ -100,7 +100,7 @@ version2
|
||||
}
|
||||
|
||||
func TestUpdateContextWriteJSON(t *testing.T) {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1", Notes: "note1"},
|
||||
{Type: "updateType2", Version: "version2", Notes: "note2"},
|
||||
}
|
||||
@ -124,7 +124,7 @@ func TestUpdateContextWriteJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateContextWriteJSONField(t *testing.T) {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1"},
|
||||
{Type: "updateType2", Version: "version2"},
|
||||
}
|
||||
|
||||
@ -19,6 +19,7 @@ type importOptions struct {
|
||||
reference string
|
||||
changes dockeropts.ListOpts
|
||||
message string
|
||||
platform string
|
||||
}
|
||||
|
||||
// NewImportCommand creates a new `docker import` command
|
||||
@ -43,6 +44,7 @@ func NewImportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options.changes = dockeropts.NewListOpts(nil)
|
||||
flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
|
||||
flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image")
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@ -71,8 +73,9 @@ func runImport(dockerCli command.Cli, options importOptions) error {
|
||||
}
|
||||
|
||||
importOptions := types.ImageImportOptions{
|
||||
Message: options.message,
|
||||
Changes: options.changes.GetAll(),
|
||||
Message: options.message,
|
||||
Changes: options.changes.GetAll(),
|
||||
Platform: options.platform,
|
||||
}
|
||||
|
||||
clnt := dockerCli.Client()
|
||||
|
||||
@ -70,7 +70,7 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err e
|
||||
|
||||
// RunPrune calls the Network Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
return 0, output, err
|
||||
}
|
||||
|
||||
@ -2,7 +2,6 @@ package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@ -38,10 +37,6 @@ func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command {
|
||||
}
|
||||
|
||||
func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
if opts.verbose && len(opts.format) != 0 {
|
||||
return errors.New("the verbose and the format options conflict")
|
||||
}
|
||||
|
||||
du, err := dockerCli.Client().DiskUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -52,13 +47,20 @@ func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
|
||||
var bsz int64
|
||||
for _, bc := range du.BuildCache {
|
||||
if !bc.Shared {
|
||||
bsz += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
duCtx := formatter.DiskUsageContext{
|
||||
Context: formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewDiskUsageFormat(format),
|
||||
Format: formatter.NewDiskUsageFormat(format, opts.verbose),
|
||||
},
|
||||
LayersSize: du.LayersSize,
|
||||
BuilderSize: du.BuilderSize,
|
||||
BuilderSize: bsz,
|
||||
BuildCache: du.BuildCache,
|
||||
Images: du.Images,
|
||||
Containers: du.Containers,
|
||||
|
||||
@ -204,6 +204,9 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), "Live Restore Enabled:", info.LiveRestoreEnabled)
|
||||
if info.ProductLicense != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), "Product License:", info.ProductLicense)
|
||||
}
|
||||
fmt.Fprint(dockerCli.Out(), "\n")
|
||||
|
||||
printWarnings(dockerCli, info)
|
||||
|
||||
@ -2,12 +2,12 @@ package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/builder"
|
||||
"github.com/docker/cli/cli/command/container"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/command/network"
|
||||
@ -21,20 +21,21 @@ import (
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
all bool
|
||||
pruneBuildCache bool
|
||||
pruneVolumes bool
|
||||
pruneBuildCache bool
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// newPruneCommand creates a new cobra.Command for `docker prune`
|
||||
func newPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt(), pruneBuildCache: true}
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune [OPTIONS]",
|
||||
Short: "Remove unused data",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.pruneBuildCache = versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
||||
return runPrune(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"version": "1.25"},
|
||||
@ -57,44 +58,30 @@ const confirmationTemplate = `WARNING! This will remove:
|
||||
{{- end }}
|
||||
Are you sure you want to continue?`
|
||||
|
||||
// runBuildCachePrune executes a prune command for build cache
|
||||
func runBuildCachePrune(dockerCli command.Cli, _ opts.FilterOpt) (uint64, string, error) {
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background())
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
return report.SpaceReclaimed, "", nil
|
||||
}
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) error {
|
||||
// TODO version this once "until" filter is supported for volumes
|
||||
if options.pruneVolumes && options.filter.Value().Contains("until") {
|
||||
return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`)
|
||||
}
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.31") {
|
||||
options.pruneBuildCache = false
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), confirmationMessage(options)) {
|
||||
return nil
|
||||
}
|
||||
imagePrune := func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return image.RunPrune(dockerCli, options.all, options.filter)
|
||||
}
|
||||
pruneFuncs := []func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error){
|
||||
pruneFuncs := []func(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error){
|
||||
container.RunPrune,
|
||||
network.RunPrune,
|
||||
}
|
||||
if options.pruneVolumes {
|
||||
pruneFuncs = append(pruneFuncs, volume.RunPrune)
|
||||
}
|
||||
pruneFuncs = append(pruneFuncs, imagePrune)
|
||||
if options.pruneBuildCache {
|
||||
pruneFuncs = append(pruneFuncs, runBuildCachePrune)
|
||||
pruneFuncs = append(pruneFuncs, builder.CachePrune)
|
||||
}
|
||||
// FIXME: modify image.RunPrune to not modify options.filter, otherwise this has to be last in the list.
|
||||
pruneFuncs = append(pruneFuncs, image.RunPrune)
|
||||
|
||||
var spaceReclaimed uint64
|
||||
for _, pruneFn := range pruneFuncs {
|
||||
spc, output, err := pruneFn(dockerCli, options.filter)
|
||||
spc, output, err := pruneFn(dockerCli, options.all, options.filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,7 +113,11 @@ func confirmationMessage(options pruneOptions) string {
|
||||
warnings = append(warnings, "all dangling images")
|
||||
}
|
||||
if options.pruneBuildCache {
|
||||
warnings = append(warnings, "all build cache")
|
||||
if options.all {
|
||||
warnings = append(warnings, "all build cache")
|
||||
} else {
|
||||
warnings = append(warnings, "all dangling build cache")
|
||||
}
|
||||
}
|
||||
if len(options.filter.String()) > 0 {
|
||||
warnings = append(warnings, "Elements to be pruned will be filtered with:")
|
||||
|
||||
@ -73,6 +73,6 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
||||
|
||||
// RunPrune calls the Volume Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
}
|
||||
|
||||
@ -176,15 +176,21 @@ func extractVariable(value interface{}, pattern *regexp.Regexp) ([]extractedValu
|
||||
|
||||
// Soft default (fall back if unset or empty)
|
||||
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withDefault(substitution, mapping, "-:")
|
||||
sep := ":-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withDefault(substitution, mapping, "-")
|
||||
}
|
||||
|
||||
func withDefault(substitution string, mapping Mapping, sep string) (string, bool, error) {
|
||||
sep := "-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
@ -78,6 +78,12 @@ func TestEmptyValueWithSoftDefault(t *testing.T) {
|
||||
assert.Check(t, is.Equal("ok def", result))
|
||||
}
|
||||
|
||||
func TestValueWithSoftDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${FOO:-def}", defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("ok first", result))
|
||||
}
|
||||
|
||||
func TestEmptyValueWithHardDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${BAR-def}", defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -10,8 +10,10 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper/ssh"
|
||||
@ -82,6 +84,9 @@ func newCommandConn(ctx context.Context, cmd string, args ...string) (net.Conn,
|
||||
// commandConn implements net.Conn
|
||||
type commandConn struct {
|
||||
cmd *exec.Cmd
|
||||
cmdExited bool
|
||||
cmdWaitErr error
|
||||
cmdMutex sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderrMu sync.Mutex
|
||||
@ -101,23 +106,75 @@ func (c *commandConn) killIfStdioClosed() error {
|
||||
if !stdioClosed {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
// NOTE: maybe already killed here
|
||||
if err = c.cmd.Process.Kill(); err == nil {
|
||||
err = c.cmd.Wait()
|
||||
return c.kill()
|
||||
}
|
||||
|
||||
// killAndWait tries sending SIGTERM to the process before sending SIGKILL.
|
||||
func killAndWait(cmd *exec.Cmd) error {
|
||||
var werr error
|
||||
if runtime.GOOS != "windows" {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- cmd.Wait() }()
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
case <-time.After(3 * time.Second):
|
||||
cmd.Process.Kill()
|
||||
werr = <-werrCh
|
||||
}
|
||||
} else {
|
||||
cmd.Process.Kill()
|
||||
werr = cmd.Wait()
|
||||
}
|
||||
if err != nil {
|
||||
// err is typically "os: process already finished".
|
||||
// we check ProcessState here instead of `strings.Contains(err, "os: process already finished")`
|
||||
if c.cmd.ProcessState.Exited() {
|
||||
err = nil
|
||||
return werr
|
||||
}
|
||||
|
||||
// kill returns nil if the command terminated, regardless to the exit status.
|
||||
func (c *commandConn) kill() error {
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werr = killAndWait(c.cmd)
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return nil
|
||||
}
|
||||
wExitErr, ok := werr.(*exec.ExitError)
|
||||
if ok {
|
||||
if wExitErr.ProcessState.Exited() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
return errors.Wrapf(werr, "connhelper: failed to wait")
|
||||
}
|
||||
|
||||
func (c *commandConn) onEOF(eof error) error {
|
||||
werr := c.cmd.Wait()
|
||||
// when we got EOF, the command is going to be terminated
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- c.cmd.Wait() }()
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
case <-time.After(10 * time.Second):
|
||||
c.cmdMutex.Unlock()
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr)
|
||||
}
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return eof
|
||||
}
|
||||
@ -148,7 +205,10 @@ func (c *commandConn) CloseRead() error {
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdoutClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return c.killIfStdioClosed()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseRead: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Read(p []byte) (int, error) {
|
||||
@ -167,7 +227,10 @@ func (c *commandConn) CloseWrite() error {
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdinClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return c.killIfStdioClosed()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseWrite: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Write(p []byte) (int, error) {
|
||||
|
||||
@ -200,7 +200,7 @@ func continueOnError(err error) bool {
|
||||
}
|
||||
|
||||
func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error {
|
||||
endpoints, err := allEndpoints(namedRef)
|
||||
endpoints, err := allEndpoints(namedRef, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -262,12 +262,18 @@ func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named,
|
||||
}
|
||||
|
||||
// allEndpoints returns a list of endpoints ordered by priority (v2, https, v1).
|
||||
func allEndpoints(namedRef reference.Named) ([]registry.APIEndpoint, error) {
|
||||
func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(namedRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registryService, err := registry.NewService(registry.ServiceOptions{})
|
||||
|
||||
var serviceOpts registry.ServiceOptions
|
||||
if insecure {
|
||||
logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef))
|
||||
serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)}
|
||||
}
|
||||
registryService, err := registry.NewService(serviceOpts)
|
||||
if err != nil {
|
||||
return []registry.APIEndpoint{}, err
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
@ -168,7 +169,7 @@ func main() {
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
logrus.SetOutput(stderr)
|
||||
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, contentTrustEnabled())
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, contentTrustEnabled(), containerizedengine.NewClient)
|
||||
cmd := newDockerCommand(dockerCli)
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
|
||||
@ -26,7 +26,7 @@ func TestClientDebugEnabled(t *testing.T) {
|
||||
|
||||
func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) {
|
||||
discard := ioutil.Discard
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false))
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false, nil))
|
||||
cmd.SetArgs([]string{"help", "invalid"})
|
||||
err := cmd.Execute()
|
||||
assert.Error(t, err, "unknown help topic: invalid")
|
||||
|
||||
@ -105,7 +105,7 @@ shellcheck: build_shell_validate_image ## run shellcheck validation
|
||||
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
|
||||
|
||||
.PHONY: test-e2e ## run e2e tests
|
||||
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-containerized
|
||||
test-e2e: test-e2e-non-experimental test-e2e-experimental
|
||||
|
||||
.PHONY: test-e2e-experimental
|
||||
test-e2e-experimental: build_e2e_image
|
||||
@ -115,14 +115,6 @@ test-e2e-experimental: build_e2e_image
|
||||
test-e2e-non-experimental: build_e2e_image
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: test-e2e-containerized
|
||||
test-e2e-containerized: build_e2e_image
|
||||
docker run --rm --privileged \
|
||||
-v /var/lib/docker \
|
||||
-v /var/lib/containerd \
|
||||
-v /lib/modules:/lib/modules \
|
||||
$(E2E_IMAGE_NAME) /go/src/github.com/docker/cli/scripts/test/engine/entry
|
||||
|
||||
.PHONY: help
|
||||
help: ## print this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.4-alpine
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
FROM dockercore/golang-cross:1.10.3@sha256:7671b4ed357fda50124e5679d36c4c3206ded4d43f1d2e0ff3d120a1e2bf94d7
|
||||
FROM dockercore/golang-cross:1.10.4@sha256:55c7b933ac944f4922b673b4d4340d1a0404f3c324bd0b3f13a4326c427b1f2a
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.4-alpine
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates curl
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.10.3
|
||||
ARG GO_VERSION=1.10.4
|
||||
|
||||
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
|
||||
|
||||
@ -15,28 +15,6 @@ RUN apt-get update && apt-get install -y \
|
||||
iptables \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
#ARG CONTAINERD_VERSION=v1.1.2
|
||||
#ARG CONTAINERD_VERSION=47a128d
|
||||
#ARG CONTAINERD_VERSION=6c3e782f
|
||||
ARG CONTAINERD_VERSION=65839a47a88b0a1c5dc34981f1741eccefc9f2b0
|
||||
RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd && \
|
||||
cd /go/src/github.com/containerd/containerd && \
|
||||
git checkout ${CONTAINERD_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
COPY e2eengine/config.toml /etc/containerd/config.toml
|
||||
COPY --from=containerd-shim-process /bin/containerd-shim-process-v1 /bin/
|
||||
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
ARG RUNC_VERSION=v1.0.0-rc5
|
||||
RUN git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc && \
|
||||
cd /go/src/github.com/opencontainers/runc && \
|
||||
git checkout ${RUNC_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
ARG COMPOSE_VERSION=1.21.2
|
||||
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
||||
&& chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.4-alpine
|
||||
|
||||
RUN apk add -U git
|
||||
|
||||
|
||||
@ -48,8 +48,11 @@ Options:
|
||||
'<network-name>|<network-id>': connect to a user-defined network
|
||||
--no-cache Do not use cache when building the image
|
||||
--pull Always attempt to pull a newer version of the image
|
||||
--progress Set type of progress output (only if BuildKit enabled) (auto, plain, tty).
|
||||
Use plain to show container output
|
||||
-q, --quiet Suppress the build output and print image ID on success
|
||||
--rm Remove intermediate containers after a successful build (default true)
|
||||
--secret Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret"
|
||||
--security-opt value Security Options (default [])
|
||||
--shm-size bytes Size of /dev/shm
|
||||
The format is `<number><unit>`. `number` must be greater than `0`.
|
||||
|
||||
@ -1297,6 +1297,7 @@ This is a full example of the allowed configuration options on Linux:
|
||||
"exec-opts": [],
|
||||
"exec-root": "",
|
||||
"experimental": false,
|
||||
"features": {},
|
||||
"storage-driver": "",
|
||||
"storage-opts": [],
|
||||
"labels": [],
|
||||
@ -1392,6 +1393,7 @@ This is a full example of the allowed configuration options on Windows:
|
||||
"dns-search": [],
|
||||
"exec-opts": [],
|
||||
"experimental": false,
|
||||
"features":{},
|
||||
"storage-driver": "",
|
||||
"storage-opts": [],
|
||||
"labels": [],
|
||||
@ -1446,11 +1448,12 @@ The list of currently supported options that can be reconfigured is this:
|
||||
the runtime shipped with the official docker packages.
|
||||
- `runtimes`: it updates the list of available OCI runtimes that can
|
||||
be used to run containers.
|
||||
- `authorization-plugin`: specifies the authorization plugins to use.
|
||||
- `authorization-plugin`: it specifies the authorization plugins to use.
|
||||
- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries.
|
||||
- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config.
|
||||
- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config.
|
||||
- `shutdown-timeout`: it replaces the daemon's existing configuration timeout with a new timeout for shutting down all containers.
|
||||
- `features`: it explicitly enables or disables specific features.
|
||||
|
||||
Updating and reloading the cluster configurations such as `--cluster-store`,
|
||||
`--cluster-advertise` and `--cluster-store-opts` will take effect only if
|
||||
|
||||
@ -24,6 +24,7 @@ Options:
|
||||
-c, --change value Apply Dockerfile instruction to the created image (default [])
|
||||
--help Print usage
|
||||
-m, --message string Set commit message for imported image
|
||||
--platform string Set platform if server is multi-platform capable
|
||||
```
|
||||
|
||||
## Description
|
||||
@ -87,3 +88,11 @@ Note the `sudo` in this example – you must preserve
|
||||
the ownership of the files (especially root ownership) during the
|
||||
archiving with tar. If you are not root (or the sudo command) when you
|
||||
tar, then the ownerships might not get preserved.
|
||||
|
||||
## When the daemon supports multiple operating systems
|
||||
If the daemon supports multiple operating systems, and the image being imported
|
||||
does not match the default operating system, it may be necessary to add
|
||||
`--platform`. This would be necessary when importing a Linux image into a Windows
|
||||
daemon.
|
||||
|
||||
# docker import --platform=linux .\linuximage.tar
|
||||
|
||||
@ -19,7 +19,7 @@ const descriptionSourcePath = "docs/reference/commandline/"
|
||||
|
||||
func generateCliYaml(opts *options) error {
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false)
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false, nil)
|
||||
cmd := &cobra.Command{Use: "docker"}
|
||||
commands.AddCommands(cmd, dockerCli)
|
||||
source := filepath.Join(opts.source, descriptionSourcePath)
|
||||
|
||||
@ -1,42 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdAltRootConfig(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--config-file", "/tmp/etc/docker/daemon.json"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
// Make sure update doesn't blow up with alternate config path
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
targetVersion := os.Getenv("VERSION")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
|
||||
[debug]
|
||||
address = "/run/containerd/debug.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
level = "debug"
|
||||
@ -1,85 +0,0 @@
|
||||
package multi
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdMultiTest(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Log("Attempt engine init without experimental")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=disabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "docker engine init is only supported",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("checking for updates")
|
||||
// Check for updates
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "check", "--downgrades", "--pre-releases"))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "VERSION",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("attempt second init (should fail)")
|
||||
// Attempt to init a second time and fail
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "engine already present",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
targetVersion := os.Getenv("VERSION")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("remove engine")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "rm"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
||||
@ -1,39 +0,0 @@
|
||||
package e2eengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
)
|
||||
|
||||
// CleanupEngine ensures the local engine has been removed between testcases
|
||||
func CleanupEngine(t *testing.T) error {
|
||||
t.Log("doing engine cleanup")
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := containerizedengine.NewClient("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// See if the engine exists first
|
||||
engine, err := client.GetEngine(ctx)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not present") {
|
||||
t.Log("engine was not detected, no cleanup to perform")
|
||||
// Nothing to do, it's not defined
|
||||
return nil
|
||||
}
|
||||
t.Logf("failed to lookup engine: %s", err)
|
||||
// Any other error is not good...
|
||||
return err
|
||||
}
|
||||
// TODO Consider nuking the docker dir too so there's no cached content between test cases
|
||||
err = client.RemoveEngine(ctx, engine)
|
||||
if err != nil {
|
||||
t.Logf("Failed to remove engine: %s", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1,12 +1,9 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
containerdtypes "github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
@ -14,7 +11,6 @@ import (
|
||||
prototypes "github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -25,6 +21,8 @@ type (
|
||||
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
|
||||
contentStoreFunc func() content.Store
|
||||
containerServiceFunc func() containers.Store
|
||||
installFunc func(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
||||
versionFunc func(ctx context.Context) (containerd.Version, error)
|
||||
}
|
||||
fakeContainer struct {
|
||||
idFunc func() string
|
||||
@ -49,30 +47,6 @@ type (
|
||||
isUnpackedFunc func(context.Context, string) (bool, error)
|
||||
contentStoreFunc func() content.Store
|
||||
}
|
||||
fakeTask struct {
|
||||
idFunc func() string
|
||||
pidFunc func() uint32
|
||||
startFunc func(context.Context) error
|
||||
deleteFunc func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error)
|
||||
killFunc func(context.Context, syscall.Signal, ...containerd.KillOpts) error
|
||||
waitFunc func(context.Context) (<-chan containerd.ExitStatus, error)
|
||||
closeIOFunc func(context.Context, ...containerd.IOCloserOpts) error
|
||||
resizeFunc func(ctx context.Context, w, h uint32) error
|
||||
ioFunc func() cio.IO
|
||||
statusFunc func(context.Context) (containerd.Status, error)
|
||||
pauseFunc func(context.Context) error
|
||||
resumeFunc func(context.Context) error
|
||||
execFunc func(context.Context, string, *specs.Process, cio.Creator) (containerd.Process, error)
|
||||
pidsFunc func(context.Context) ([]containerd.ProcessInfo, error)
|
||||
checkpointFunc func(context.Context, ...containerd.CheckpointTaskOpts) (containerd.Image, error)
|
||||
updateFunc func(context.Context, ...containerd.UpdateTaskOpts) error
|
||||
loadProcessFunc func(context.Context, string, cio.Attach) (containerd.Process, error)
|
||||
metricsFunc func(context.Context) (*containerdtypes.Metric, error)
|
||||
}
|
||||
|
||||
testOutStream struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
)
|
||||
|
||||
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
@ -114,6 +88,18 @@ func (w *fakeContainerdClient) ContainerService() containers.Store {
|
||||
func (w *fakeContainerdClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerdClient) Install(ctx context.Context, image containerd.Image, args ...containerd.InstallOpts) error {
|
||||
if w.installFunc != nil {
|
||||
return w.installFunc(ctx, image, args...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerdClient) Version(ctx context.Context) (containerd.Version, error) {
|
||||
if w.versionFunc != nil {
|
||||
return w.versionFunc(ctx)
|
||||
}
|
||||
return containerd.Version{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeContainer) ID() string {
|
||||
if c.idFunc != nil {
|
||||
@ -230,119 +216,3 @@ func (i *fakeImage) ContentStore() content.Store {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *fakeTask) ID() string {
|
||||
if t.idFunc != nil {
|
||||
return t.idFunc()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (t *fakeTask) Pid() uint32 {
|
||||
if t.pidFunc != nil {
|
||||
return t.pidFunc()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (t *fakeTask) Start(ctx context.Context) error {
|
||||
if t.startFunc != nil {
|
||||
return t.startFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||
if t.deleteFunc != nil {
|
||||
return t.deleteFunc(ctx, opts...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Kill(ctx context.Context, signal syscall.Signal, opts ...containerd.KillOpts) error {
|
||||
if t.killFunc != nil {
|
||||
return t.killFunc(ctx, signal, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
if t.waitFunc != nil {
|
||||
return t.waitFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) CloseIO(ctx context.Context, opts ...containerd.IOCloserOpts) error {
|
||||
if t.closeIOFunc != nil {
|
||||
return t.closeIOFunc(ctx, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Resize(ctx context.Context, w, h uint32) error {
|
||||
if t.resizeFunc != nil {
|
||||
return t.resizeFunc(ctx, w, h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) IO() cio.IO {
|
||||
if t.ioFunc != nil {
|
||||
return t.ioFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Status(ctx context.Context) (containerd.Status, error) {
|
||||
if t.statusFunc != nil {
|
||||
return t.statusFunc(ctx)
|
||||
}
|
||||
return containerd.Status{}, nil
|
||||
}
|
||||
func (t *fakeTask) Pause(ctx context.Context) error {
|
||||
if t.pauseFunc != nil {
|
||||
return t.pauseFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Resume(ctx context.Context) error {
|
||||
if t.resumeFunc != nil {
|
||||
return t.resumeFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Exec(ctx context.Context, cmd string, proc *specs.Process, ioc cio.Creator) (containerd.Process, error) {
|
||||
if t.execFunc != nil {
|
||||
return t.execFunc(ctx, cmd, proc, ioc)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) {
|
||||
if t.pidsFunc != nil {
|
||||
return t.pidsFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Checkpoint(ctx context.Context, opts ...containerd.CheckpointTaskOpts) (containerd.Image, error) {
|
||||
if t.checkpointFunc != nil {
|
||||
return t.checkpointFunc(ctx, opts...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Update(ctx context.Context, opts ...containerd.UpdateTaskOpts) error {
|
||||
if t.updateFunc != nil {
|
||||
return t.updateFunc(ctx, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) LoadProcess(ctx context.Context, name string, attach cio.Attach) (containerd.Process, error) {
|
||||
if t.loadProcessFunc != nil {
|
||||
return t.loadProcessFunc(ctx, name, attach)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Metrics(ctx context.Context) (*containerdtypes.Metric, error) {
|
||||
if t.metricsFunc != nil {
|
||||
return t.metricsFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (o *testOutStream) FD() uintptr {
|
||||
return 0
|
||||
}
|
||||
func (o *testOutStream) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -15,7 +16,7 @@ import (
|
||||
// NewClient returns a new containerizedengine client
|
||||
// This client can be used to manage the lifecycle of
|
||||
// dockerd running as a container on containerd.
|
||||
func NewClient(sockPath string) (Client, error) {
|
||||
func NewClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
if sockPath == "" {
|
||||
sockPath = containerdSockPath
|
||||
}
|
||||
@ -23,17 +24,17 @@ func NewClient(sockPath string) (Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return baseClient{
|
||||
return &baseClient{
|
||||
cclient: cclient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close will close the underlying clients
|
||||
func (c baseClient) Close() error {
|
||||
func (c *baseClient) Close() error {
|
||||
return c.cclient.Close()
|
||||
}
|
||||
|
||||
func (c baseClient) pullWithAuth(ctx context.Context, imageName string, out OutStream,
|
||||
func (c *baseClient) pullWithAuth(ctx context.Context, imageName string, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) (containerd.Image, error) {
|
||||
|
||||
resolver := docker.NewResolver(docker.ResolverOptions{
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
@ -22,7 +24,7 @@ func TestPullWithAuthPullFail(t *testing.T) {
|
||||
}
|
||||
imageName := "testnamegoeshere"
|
||||
|
||||
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
|
||||
_, err := client.pullWithAuth(ctx, imageName, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "pull failure")
|
||||
}
|
||||
|
||||
@ -38,6 +40,6 @@ func TestPullWithAuthPullPass(t *testing.T) {
|
||||
}
|
||||
imageName := "testnamegoeshere"
|
||||
|
||||
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
|
||||
_, err := client.pullWithAuth(ctx, imageName, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
@ -1,261 +0,0 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/runtime/restart"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// InitEngine is the main entrypoint for `docker engine init`
|
||||
func (c baseClient) InitEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
// Verify engine isn't already running
|
||||
_, err := c.GetEngine(ctx)
|
||||
if err == nil {
|
||||
return ErrEngineAlreadyPresent
|
||||
} else if err != ErrEngineNotPresent {
|
||||
return err
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
// Look for desired image
|
||||
_, err = c.cclient.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
_, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
||||
}
|
||||
}
|
||||
|
||||
// Spin up the engine
|
||||
err = c.startEngineOnContainerd(ctx, imageName, opts.ConfigFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create docker daemon")
|
||||
}
|
||||
|
||||
// Wait for the daemon to start, verify it's responsive
|
||||
fmt.Fprintf(out, "Waiting for engine to start... ")
|
||||
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
|
||||
defer cancel()
|
||||
if err := c.waitForEngine(ctx, out, healthfn); err != nil {
|
||||
// TODO once we have the logging strategy sorted out
|
||||
// this should likely gather the last few lines of logs to report
|
||||
// why the daemon failed to initialize
|
||||
return errors.Wrap(err, "failed to start docker daemon")
|
||||
}
|
||||
fmt.Fprintf(out, "Success! The docker engine is now running.\n")
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// GetEngine will return the containerd container running the engine (or error)
|
||||
func (c baseClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
containers, err := c.cclient.Containers(ctx, "id=="+engineContainerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(containers) == 0 {
|
||||
return nil, ErrEngineNotPresent
|
||||
}
|
||||
return containers[0], nil
|
||||
}
|
||||
|
||||
// getEngineImage will return the current image used by the engine
|
||||
func (c baseClient) getEngineImage(engine containerd.Container) (string, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), engineNamespace)
|
||||
image, err := engine.Image(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return image.Name(), nil
|
||||
}
|
||||
|
||||
// getEngineConfigFilePath will extract the config file location from the engine flags
|
||||
func (c baseClient) getEngineConfigFilePath(ctx context.Context, engine containerd.Container) (string, error) {
|
||||
spec, err := engine.Spec(ctx)
|
||||
configFile := ""
|
||||
if err != nil {
|
||||
return configFile, err
|
||||
}
|
||||
for i := 0; i < len(spec.Process.Args); i++ {
|
||||
arg := spec.Process.Args[i]
|
||||
if strings.HasPrefix(arg, "--config-file") {
|
||||
if strings.Contains(arg, "=") {
|
||||
split := strings.SplitN(arg, "=", 2)
|
||||
configFile = split[1]
|
||||
} else {
|
||||
if i+1 >= len(spec.Process.Args) {
|
||||
return configFile, ErrMalformedConfigFileParam
|
||||
}
|
||||
configFile = spec.Process.Args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if configFile == "" {
|
||||
// TODO - any more diagnostics to offer?
|
||||
return configFile, ErrEngineConfigLookupFailure
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
||||
|
||||
var (
|
||||
engineWaitInterval = 500 * time.Millisecond
|
||||
engineWaitTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
// waitForEngine will wait for the engine to start
|
||||
func (c baseClient) waitForEngine(ctx context.Context, out io.Writer, healthfn func(context.Context) error) error {
|
||||
ticker := time.NewTicker(engineWaitInterval)
|
||||
defer ticker.Stop()
|
||||
defer func() {
|
||||
fmt.Fprintf(out, "\n")
|
||||
}()
|
||||
|
||||
err := c.waitForEngineContainer(ctx, ticker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "waiting for engine to be responsive... ")
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err = healthfn(ctx)
|
||||
if err == nil {
|
||||
fmt.Fprintf(out, "engine is online.")
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(err, "timeout waiting for engine to be responsive")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c baseClient) waitForEngineContainer(ctx context.Context, ticker *time.Ticker) error {
|
||||
var ret error
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
engine, err := c.GetEngine(ctx)
|
||||
if engine != nil {
|
||||
return nil
|
||||
}
|
||||
ret = err
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ret, "timeout waiting for engine to be responsive")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveEngine gracefully unwinds the current engine
|
||||
func (c baseClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
|
||||
// Make sure the container isn't being restarted while we unwind it
|
||||
stopLabel := map[string]string{}
|
||||
stopLabel[restart.StatusLabel] = string(containerd.Stopped)
|
||||
engine.SetLabels(ctx, stopLabel)
|
||||
|
||||
// Wind down the existing engine
|
||||
task, err := engine.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
status, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status.Status == containerd.Running {
|
||||
// It's running, so kill it
|
||||
err := task.Kill(ctx, syscall.SIGTERM, []containerd.KillOpts{}...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "task kill error")
|
||||
}
|
||||
|
||||
ch, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timeout := time.NewTimer(engineWaitTimeout)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
// TODO - consider a force flag in the future to allow a more aggressive
|
||||
// kill of the engine via
|
||||
// task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll)
|
||||
return ErrEngineShutdownTimeout
|
||||
case <-ch:
|
||||
}
|
||||
}
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteOpts := []containerd.DeleteOpts{containerd.WithSnapshotCleanup}
|
||||
err = engine.Delete(ctx, deleteOpts...)
|
||||
if err != nil && errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to remove existing engine container")
|
||||
}
|
||||
|
||||
// startEngineOnContainerd creates a new docker engine running on containerd
|
||||
func (c baseClient) startEngineOnContainerd(ctx context.Context, imageName, configFile string) error {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
image, err := c.cclient.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return fmt.Errorf("engine image missing: %s", imageName)
|
||||
}
|
||||
return errors.Wrap(err, "failed to check for engine image")
|
||||
}
|
||||
|
||||
// Make sure we have a valid config file
|
||||
err = c.verifyDockerConfig(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engineSpec.Process.Args = append(engineSpec.Process.Args,
|
||||
"--config-file", configFile,
|
||||
)
|
||||
|
||||
cOpts := []containerd.NewContainerOpts{
|
||||
containerized.WithNewSnapshot(image),
|
||||
restart.WithStatus(containerd.Running),
|
||||
restart.WithLogPath("/var/log/engine.log"), // TODO - better!
|
||||
genSpec(),
|
||||
containerd.WithRuntime("io.containerd.runtime.process.v1", nil),
|
||||
}
|
||||
|
||||
_, err = c.cclient.NewContainer(
|
||||
ctx,
|
||||
engineContainerName,
|
||||
cOpts...,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create engine container")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1,537 +0,0 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func healthfnHappy(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
func healthfnError(ctx context.Context) error {
|
||||
return fmt.Errorf("ping failure")
|
||||
}
|
||||
|
||||
func TestInitGetEngineFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: CommunityEngineImage,
|
||||
}
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.Assert(t, err == ErrEngineAlreadyPresent)
|
||||
}
|
||||
|
||||
func TestInitCheckImageFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("something went wrong")
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
}
|
||||
|
||||
func TestInitPullFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("pull failure")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to pull image")
|
||||
assert.ErrorContains(t, err, "pull failure")
|
||||
}
|
||||
|
||||
func TestInitStartFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "failed to create docker daemon")
|
||||
}
|
||||
|
||||
func TestGetEngineFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return nil, fmt.Errorf("container failure")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetEngine(ctx)
|
||||
assert.ErrorContains(t, err, "failure")
|
||||
}
|
||||
|
||||
func TestGetEngineNotPresent(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetEngine(ctx)
|
||||
assert.Assert(t, err == ErrEngineNotPresent)
|
||||
}
|
||||
|
||||
func TestGetEngineFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c, err := client.GetEngine(ctx)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, c, container)
|
||||
}
|
||||
|
||||
func TestGetEngineImageFail(t *testing.T) {
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("failure")
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.getEngineImage(container)
|
||||
assert.ErrorContains(t, err, "failure")
|
||||
}
|
||||
|
||||
func TestGetEngineImagePass(t *testing.T) {
|
||||
client := baseClient{}
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "imagenamehere"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
|
||||
name, err := client.getEngineImage(container)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, name, "imagenamehere")
|
||||
}
|
||||
|
||||
func TestWaitForEngineNeverShowsUp(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
|
||||
assert.ErrorContains(t, err, "timeout waiting")
|
||||
}
|
||||
|
||||
func TestWaitForEnginePingFail(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
|
||||
assert.ErrorContains(t, err, "ping fail")
|
||||
}
|
||||
|
||||
func TestWaitForEngineHealthy(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, &testOutStream{}, healthfnHappy)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveEngineBadTaskBadDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
deleteFunc: func(context.Context, ...containerd.DeleteOpts) error {
|
||||
return fmt.Errorf("delete failure")
|
||||
},
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "failed to remove existing engine")
|
||||
assert.ErrorContains(t, err, "delete failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskNoStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{}, fmt.Errorf("task status failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task status failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskNotRunningDeleteFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Unknown}, nil
|
||||
},
|
||||
deleteFunc: func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||
return nil, fmt.Errorf("task delete failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task delete failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningKillFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
killFunc: func(context.Context, syscall.Signal, ...containerd.KillOpts) error {
|
||||
return fmt.Errorf("task kill failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task kill failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningWaitFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
return nil, fmt.Errorf("task wait failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task wait failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningHappyPath(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
ch <- containerd.ExitStatus{}
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskKillTimeout(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
client := baseClient{}
|
||||
engineWaitTimeout = 10 * time.Millisecond
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
//ch <- containerd.ExitStatus{} // let it timeout
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.RemoveEngine(ctx, container)
|
||||
assert.Assert(t, err == ErrEngineShutdownTimeout)
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdImageErr(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("some image lookup failure")
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.ErrorContains(t, err, "some image lookup failure")
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdImageNotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.ErrorContains(t, err, "engine image missing")
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdHappy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
streams := cio.Streams{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
ch <- containerd.ExitStatus{}
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
newTaskFunc: func(ctx context.Context, creator cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
|
||||
if streams.Stdout != nil {
|
||||
streams.Stdout.Write([]byte("{}"))
|
||||
}
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, nil
|
||||
|
||||
},
|
||||
newContainerFunc: func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
|
||||
return container, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathBadSpec(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return nil, fmt.Errorf("spec error")
|
||||
},
|
||||
}
|
||||
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.ErrorContains(t, err, "spec error")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathDistinct(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag",
|
||||
"foo",
|
||||
"--config-file",
|
||||
"configpath",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, err, configFile == "configpath")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathEquals(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag=foo",
|
||||
"--config-file=configpath",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, err, configFile == "configpath")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathMalformed1(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag",
|
||||
"--config-file",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.Assert(t, err == ErrMalformedConfigFileParam)
|
||||
}
|
||||
@ -1,16 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
)
|
||||
|
||||
func genSpec() containerd.NewContainerOpts {
|
||||
return containerd.WithSpec(&engineSpec,
|
||||
containerized.WithAllCapabilities,
|
||||
oci.WithParentCgroupDevices,
|
||||
)
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
)
|
||||
|
||||
func genSpec() containerd.NewContainerOpts {
|
||||
return containerd.WithSpec(&engineSpec,
|
||||
containerized.WithAllCapabilities,
|
||||
)
|
||||
}
|
||||
@ -1,35 +0,0 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func (c baseClient) verifyDockerConfig(configFile string) error {
|
||||
|
||||
// TODO - in the future consider leveraging containerd and a host runtime
|
||||
// to create the file. For now, just create it locally since we have to be
|
||||
// local to talk to containerd
|
||||
|
||||
configDir := path.Dir(configFile)
|
||||
err := os.MkdirAll(configDir, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
info, err := fd.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Size() == 0 {
|
||||
_, err := fd.Write([]byte("{}"))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -64,7 +64,7 @@ outer:
|
||||
}
|
||||
}
|
||||
|
||||
err := updateNonActive(ctx, ongoing, cs, statuses, keys, activeSeen, &done, start)
|
||||
err := updateNonActive(ctx, ongoing, cs, statuses, &keys, activeSeen, &done, start)
|
||||
if err != nil {
|
||||
continue outer
|
||||
}
|
||||
@ -92,11 +92,11 @@ outer:
|
||||
}
|
||||
}
|
||||
|
||||
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys []string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
|
||||
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys *[]string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
|
||||
|
||||
for _, j := range ongoing.jobs() {
|
||||
key := remotes.MakeRefKey(ctx, j)
|
||||
keys = append(keys, key)
|
||||
*keys = append(*keys, key)
|
||||
if _, ok := activeSeen[key]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
// SIGKILL maps to unix.SIGKILL
|
||||
SIGKILL = unix.SIGKILL
|
||||
)
|
||||
@ -1,12 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
// SIGKILL all signals are ignored by containerd kill windows
|
||||
SIGKILL = syscall.Signal(0)
|
||||
)
|
||||
@ -3,30 +3,15 @@ package containerizedengine
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/docker/api/types"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// CommunityEngineImage is the repo name for the community engine
|
||||
CommunityEngineImage = "engine-community"
|
||||
|
||||
// EnterpriseEngineImage is the repo name for the enterprise engine
|
||||
EnterpriseEngineImage = "engine-enterprise"
|
||||
|
||||
containerdSockPath = "/run/containerd/containerd.sock"
|
||||
engineContainerName = "dockerd"
|
||||
engineNamespace = "docker"
|
||||
|
||||
// Used to signal the containerd-proxy if it should manage
|
||||
proxyLabel = "com.docker/containerd-proxy.scope"
|
||||
containerdSockPath = "/run/containerd/containerd.sock"
|
||||
engineNamespace = "com.docker"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -44,80 +29,12 @@ var (
|
||||
|
||||
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
|
||||
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
|
||||
|
||||
// ErrEngineImageMissingTag returned if the engine image is missing the version tag
|
||||
ErrEngineImageMissingTag = errors.New("malformed engine image missing tag")
|
||||
|
||||
engineSpec = specs.Spec{
|
||||
Root: &specs.Root{
|
||||
Path: "rootfs",
|
||||
},
|
||||
Process: &specs.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{
|
||||
// In general, configuration should be driven by the config file, not these flags
|
||||
// TODO - consider moving more of these to the config file, and make sure the defaults are set if not present.
|
||||
"/sbin/dockerd",
|
||||
"-s",
|
||||
"overlay2",
|
||||
"--containerd",
|
||||
"/run/containerd/containerd.sock",
|
||||
"--default-runtime",
|
||||
"containerd",
|
||||
"--add-runtime",
|
||||
"containerd=runc",
|
||||
},
|
||||
User: specs.User{
|
||||
UID: 0,
|
||||
GID: 0,
|
||||
},
|
||||
Env: []string{
|
||||
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
|
||||
},
|
||||
NoNewPrivileges: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Client can be used to manage the lifecycle of
|
||||
// dockerd running as a container on containerd.
|
||||
type Client interface {
|
||||
Close() error
|
||||
ActivateEngine(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
InitEngine(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
DoUpdate(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error)
|
||||
|
||||
GetEngine(ctx context.Context) (containerd.Container, error)
|
||||
RemoveEngine(ctx context.Context, engine containerd.Container) error
|
||||
GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error)
|
||||
}
|
||||
type baseClient struct {
|
||||
cclient containerdClient
|
||||
}
|
||||
|
||||
// EngineInitOptions contains the configuration settings
|
||||
// use during initialization of a containerized docker engine
|
||||
type EngineInitOptions struct {
|
||||
RegistryPrefix string
|
||||
EngineImage string
|
||||
EngineVersion string
|
||||
ConfigFile string
|
||||
scope string
|
||||
}
|
||||
|
||||
// containerdClient abstracts the containerd client to aid in testability
|
||||
type containerdClient interface {
|
||||
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
||||
@ -127,33 +44,6 @@ type containerdClient interface {
|
||||
Close() error
|
||||
ContentStore() content.Store
|
||||
ContainerService() containers.Store
|
||||
}
|
||||
|
||||
// AvailableVersions groups the available versions which were discovered
|
||||
type AvailableVersions struct {
|
||||
Downgrades []DockerVersion
|
||||
Patches []DockerVersion
|
||||
Upgrades []DockerVersion
|
||||
}
|
||||
|
||||
// DockerVersion wraps a semantic version to retain the original tag
|
||||
// since the docker date based versions don't strictly follow semantic
|
||||
// versioning (leading zeros, etc.)
|
||||
type DockerVersion struct {
|
||||
ver.Version
|
||||
Tag string
|
||||
}
|
||||
|
||||
// Update stores available updates for rendering in a table
|
||||
type Update struct {
|
||||
Type string
|
||||
Version string
|
||||
Notes string
|
||||
}
|
||||
|
||||
// OutStream is an output stream used to write normal program output.
|
||||
type OutStream interface {
|
||||
io.Writer
|
||||
FD() uintptr
|
||||
IsTerminal() bool
|
||||
Install(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
||||
Version(ctx context.Context) (containerd.Version, error)
|
||||
}
|
||||
|
||||
@ -2,79 +2,54 @@ package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
"github.com/docker/cli/internal/versions"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetCurrentEngineVersion determines the current type of engine (image) and version
|
||||
func (c baseClient) GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error) {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
ret := EngineInitOptions{}
|
||||
currentEngine := CommunityEngineImage
|
||||
engine, err := c.GetEngine(ctx)
|
||||
if err != nil {
|
||||
if err == ErrEngineNotPresent {
|
||||
return ret, errors.Wrap(err, "failed to find existing engine")
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
imageName, err := c.getEngineImage(engine)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err != nil {
|
||||
return ret, errors.Wrapf(err, "failed to parse image name: %s", imageName)
|
||||
}
|
||||
|
||||
if strings.Contains(distributionRef.Name(), EnterpriseEngineImage) {
|
||||
currentEngine = EnterpriseEngineImage
|
||||
}
|
||||
taggedRef, ok := distributionRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return ret, ErrEngineImageMissingTag
|
||||
}
|
||||
ret.EngineImage = currentEngine
|
||||
ret.EngineVersion = taggedRef.Tag()
|
||||
ret.RegistryPrefix = reference.Domain(taggedRef) + "/" + path.Dir(reference.Path(taggedRef))
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ActivateEngine will switch the image from the CE to EE image
|
||||
func (c baseClient) ActivateEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||
func (c *baseClient) ActivateEngine(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
|
||||
// set the proxy scope to "ee" for activate flows
|
||||
opts.scope = "ee"
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
|
||||
// If version is unspecified, use the existing engine version
|
||||
if opts.EngineVersion == "" {
|
||||
currentOpts, err := c.GetCurrentEngineVersion(ctx)
|
||||
// If the user didn't specify an image, determine the correct enterprise image to use
|
||||
if opts.EngineImage == "" {
|
||||
localMetadata, err := versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image")
|
||||
}
|
||||
opts.EngineVersion = currentOpts.EngineVersion
|
||||
if currentOpts.EngineImage == EnterpriseEngineImage {
|
||||
// This is a "no-op" activation so the only change would be the license - don't update the engine itself
|
||||
return nil
|
||||
|
||||
engineImage := localMetadata.EngineImage
|
||||
if engineImage == clitypes.EnterpriseEngineImage || engineImage == clitypes.CommunityEngineImage {
|
||||
opts.EngineImage = clitypes.EnterpriseEngineImage
|
||||
} else {
|
||||
// Chop off the standard prefix and retain any trailing OS specific image details
|
||||
// e.g., engine-community-dm -> engine-enterprise-dm
|
||||
engineImage = strings.TrimPrefix(engineImage, clitypes.EnterpriseEngineImage)
|
||||
engineImage = strings.TrimPrefix(engineImage, clitypes.CommunityEngineImage)
|
||||
opts.EngineImage = clitypes.EnterpriseEngineImage + engineImage
|
||||
}
|
||||
}
|
||||
return c.DoUpdate(ctx, opts, out, authConfig, healthfn)
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
return c.DoUpdate(ctx, opts, out, authConfig)
|
||||
}
|
||||
|
||||
// DoUpdate performs the underlying engine update
|
||||
func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||
func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
if opts.EngineVersion == "" {
|
||||
@ -83,7 +58,16 @@ func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out Ou
|
||||
// current engine version and automatically apply it so users
|
||||
// could stay in sync by simply having a scheduled
|
||||
// `docker engine update`
|
||||
return fmt.Errorf("please pick the version you want to update to")
|
||||
return fmt.Errorf("pick the version you want to update to with --version")
|
||||
}
|
||||
var localMetadata *clitypes.RuntimeMetadata
|
||||
if opts.EngineImage == "" {
|
||||
var err error
|
||||
localMetadata, err = versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image set to 'engine-community' or 'engine-enterprise'")
|
||||
}
|
||||
opts.EngineImage = localMetadata.EngineImage
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
@ -101,30 +85,99 @@ func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out Ou
|
||||
}
|
||||
}
|
||||
|
||||
// Gather information about the existing engine so we can recreate it
|
||||
engine, err := c.GetEngine(ctx)
|
||||
// Make sure we're safe to proceed
|
||||
newMetadata, err := c.PreflightCheck(ctx, image)
|
||||
if err != nil {
|
||||
if err == ErrEngineNotPresent {
|
||||
return errors.Wrap(err, "unable to find existing engine - please use init")
|
||||
return err
|
||||
}
|
||||
if localMetadata != nil {
|
||||
if localMetadata.Platform != newMetadata.Platform {
|
||||
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Refer to %s for update instructions.\n\n", newMetadata.Platform, getReleaseNotesURL(imageName))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO verify the image has changed and don't update if nothing has changed
|
||||
|
||||
err = containerized.AtomicImageUpdate(ctx, engine, image, func() error {
|
||||
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
|
||||
defer cancel()
|
||||
return c.waitForEngine(ctx, out, healthfn)
|
||||
})
|
||||
if err == nil && opts.scope != "" {
|
||||
var labels map[string]string
|
||||
labels, err = engine.Labels(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels[proxyLabel] = opts.scope
|
||||
_, err = engine.SetLabels(ctx, labels)
|
||||
}
|
||||
return err
|
||||
return versions.WriteRuntimeMetadata(opts.RuntimeMetadataDir, newMetadata)
|
||||
}
|
||||
|
||||
// PreflightCheck verifies the specified image is compatible with the local system before proceeding to update/activate
|
||||
// If things look good, the RuntimeMetadata for the new image is returned and can be written out to the host
|
||||
func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image) (*clitypes.RuntimeMetadata, error) {
|
||||
var metadata clitypes.RuntimeMetadata
|
||||
ic, err := image.Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
ociimage v1.Image
|
||||
config v1.ImageConfig
|
||||
)
|
||||
switch ic.MediaType {
|
||||
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
|
||||
p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(p, &ociimage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config = ociimage.Config
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown image %s config media type %s", image.Name(), ic.MediaType)
|
||||
}
|
||||
|
||||
metadataString, ok := config.Labels["com.docker."+clitypes.RuntimeMetadataName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), clitypes.RuntimeMetadataName)
|
||||
}
|
||||
err = json.Unmarshal([]byte(metadataString), &metadata)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "malformed runtime metadata file in %s", image.Name())
|
||||
}
|
||||
|
||||
// Current CLI only supports host install runtime
|
||||
if metadata.Runtime != "host_install" {
|
||||
return nil, fmt.Errorf("unsupported daemon image: %s\nConsult the release notes at %s for upgrade instructions", metadata.Runtime, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
|
||||
// Verify local containerd is new enough
|
||||
localVersion, err := c.cclient.Version(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if metadata.ContainerdMinVersion != "" {
|
||||
lv, err := ver.NewVersion(localVersion.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mv, err := ver.NewVersion(metadata.ContainerdMinVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lv.LessThan(mv) {
|
||||
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nConsult the release notes at %s for upgrade instructions",
|
||||
localVersion.Version, metadata.ContainerdMinVersion, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
} // If omitted on metadata, no hard dependency on containerd version beyond 18.09 baseline
|
||||
|
||||
// All checks look OK, proceed with update
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// getReleaseNotesURL returns a release notes url
|
||||
// If the image name does not contain a version tag, the base release notes URL is returned
|
||||
func getReleaseNotesURL(imageName string) string {
|
||||
versionTag := ""
|
||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err == nil {
|
||||
taggedRef, ok := distributionRef.(reference.NamedTagged)
|
||||
if ok {
|
||||
versionTag = taggedRef.Tag()
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
|
||||
}
|
||||
|
||||
@ -1,169 +1,78 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/versions"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestGetCurrentEngineVersionHappy(t *testing.T) {
|
||||
func TestActivateImagePermutations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "acme.com/dockermirror/" + CommunityEngineImage + ":engineversion"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
lookedup := "not called yet"
|
||||
expectedError := fmt.Errorf("expected error")
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, opts.EngineImage, CommunityEngineImage)
|
||||
assert.Equal(t, opts.RegistryPrefix, "acme.com/dockermirror")
|
||||
assert.Equal(t, opts.EngineVersion, "engineversion")
|
||||
}
|
||||
|
||||
func TestGetCurrentEngineVersionEnterpriseHappy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "docker.io/docker/" + EnterpriseEngineImage + ":engineversion"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, opts.EngineImage, EnterpriseEngineImage)
|
||||
assert.Equal(t, opts.EngineVersion, "engineversion")
|
||||
assert.Equal(t, opts.RegistryPrefix, "docker.io/docker")
|
||||
}
|
||||
|
||||
func TestGetCurrentEngineVersionNoEngine(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.ErrorContains(t, err, "failed to find existing engine")
|
||||
}
|
||||
|
||||
func TestGetCurrentEngineVersionMiscEngineError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedError := fmt.Errorf("some container lookup error")
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
lookedup = ref
|
||||
return nil, expectedError
|
||||
},
|
||||
},
|
||||
}
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.Assert(t, err == expectedError)
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, expectedError.Error())
|
||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage, opts.EngineVersion))
|
||||
|
||||
metadata = clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, expectedError.Error())
|
||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage, opts.EngineVersion))
|
||||
|
||||
metadata = clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage + "-dm"}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, expectedError.Error())
|
||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage+"-dm", opts.EngineVersion))
|
||||
}
|
||||
|
||||
func TestGetCurrentEngineVersionImageFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("container image failure")
|
||||
},
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.ErrorContains(t, err, "container image failure")
|
||||
}
|
||||
|
||||
func TestGetCurrentEngineVersionMalformed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "imagename"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetCurrentEngineVersion(ctx)
|
||||
assert.Assert(t, err == ErrEngineImageMissingTag)
|
||||
}
|
||||
|
||||
func TestActivateNoEngine(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: EnterpriseEngineImage,
|
||||
}
|
||||
|
||||
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to find")
|
||||
}
|
||||
|
||||
func TestActivateNoChange(t *testing.T) {
|
||||
func TestActivateConfigFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
registryPrefix := "registryprefixgoeshere"
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return registryPrefix + "/" + EnterpriseEngineImage + ":engineversion"
|
||||
return registryPrefix + "/" + clitypes.EnterpriseEngineImage + ":engineversion"
|
||||
},
|
||||
configFunc: func(ctx context.Context) (ocispec.Descriptor, error) {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("config lookup failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
@ -182,17 +91,27 @@ func TestActivateNoChange(t *testing.T) {
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: EnterpriseEngineImage,
|
||||
tmpdir, err := ioutil.TempDir("", "engindir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.EnterpriseEngineImage,
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
|
||||
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.NilError(t, err)
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "config lookup failure")
|
||||
}
|
||||
|
||||
func TestActivateDoUpdateFail(t *testing.T) {
|
||||
@ -219,38 +138,60 @@ func TestActivateDoUpdateFail(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: EnterpriseEngineImage,
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.EnterpriseEngineImage,
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
|
||||
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
}
|
||||
|
||||
func TestDoUpdateNoVersion(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: EnterpriseEngineImage,
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.EnterpriseEngineImage,
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
|
||||
client := baseClient{}
|
||||
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "please pick the version you")
|
||||
err = client.DoUpdate(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "pick the version you")
|
||||
}
|
||||
|
||||
func TestDoUpdateImageMiscError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
@ -260,18 +201,26 @@ func TestDoUpdateImageMiscError(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
|
||||
err = client.DoUpdate(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
}
|
||||
|
||||
func TestDoUpdatePullFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
@ -284,35 +233,68 @@ func TestDoUpdatePullFail(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
|
||||
err = client.DoUpdate(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "unable to pull")
|
||||
assert.ErrorContains(t, err, "pull failure")
|
||||
}
|
||||
|
||||
func TestDoUpdateEngineMissing(t *testing.T) {
|
||||
func TestActivateDoUpdateVerifyImageName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
}
|
||||
registryPrefix := "registryprefixgoeshere"
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "imagenamehere"
|
||||
return registryPrefix + "/ce-engine:engineversion"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
requestedImage := "unset"
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return image, nil
|
||||
|
||||
},
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
requestedImage = ref
|
||||
return nil, fmt.Errorf("something went wrong")
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to find existing engine")
|
||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
EngineImage: "testnamegoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
RuntimeMetadataDir: tmpdir,
|
||||
}
|
||||
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{})
|
||||
assert.ErrorContains(t, err, "check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
expectedImage := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
assert.Assert(t, requestedImage == expectedImage, "%s != %s", requestedImage, expectedImage)
|
||||
}
|
||||
|
||||
func TestGetReleaseNotesURL(t *testing.T) {
|
||||
imageName := "bogus image name #$%&@!"
|
||||
url := getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
||||
imageName = "foo.bar/valid/repowithouttag"
|
||||
url = getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
||||
imageName = "foo.bar/valid/repowithouttag:tag123"
|
||||
url = getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/tag123")
|
||||
}
|
||||
|
||||
@ -1,72 +0,0 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/distribution/reference"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetEngineVersions reports the versions of the engine that are available
|
||||
func (c baseClient) GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error) {
|
||||
imageRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err != nil {
|
||||
return AvailableVersions{}, err
|
||||
}
|
||||
|
||||
tags, err := registryClient.GetTags(ctx, imageRef)
|
||||
if err != nil {
|
||||
return AvailableVersions{}, err
|
||||
}
|
||||
|
||||
return parseTags(tags, currentVersion)
|
||||
}
|
||||
|
||||
func parseTags(tags []string, currentVersion string) (AvailableVersions, error) {
|
||||
var ret AvailableVersions
|
||||
currentVer, err := ver.NewVersion(currentVersion)
|
||||
if err != nil {
|
||||
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
|
||||
}
|
||||
downgrades := []DockerVersion{}
|
||||
patches := []DockerVersion{}
|
||||
upgrades := []DockerVersion{}
|
||||
currentSegments := currentVer.Segments()
|
||||
for _, tag := range tags {
|
||||
tmp, err := ver.NewVersion(tag)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to parse %s: %s", tag, err)
|
||||
continue
|
||||
}
|
||||
testVersion := DockerVersion{Version: *tmp, Tag: tag}
|
||||
if testVersion.LessThan(currentVer) {
|
||||
downgrades = append(downgrades, testVersion)
|
||||
continue
|
||||
}
|
||||
testSegments := testVersion.Segments()
|
||||
// lib always provides min 3 segments
|
||||
if testSegments[0] == currentSegments[0] &&
|
||||
testSegments[1] == currentSegments[1] {
|
||||
patches = append(patches, testVersion)
|
||||
} else {
|
||||
upgrades = append(upgrades, testVersion)
|
||||
}
|
||||
}
|
||||
sort.Slice(downgrades, func(i, j int) bool {
|
||||
return downgrades[i].Version.LessThan(&downgrades[j].Version)
|
||||
})
|
||||
sort.Slice(patches, func(i, j int) bool {
|
||||
return patches[i].Version.LessThan(&patches[j].Version)
|
||||
})
|
||||
sort.Slice(upgrades, func(i, j int) bool {
|
||||
return upgrades[i].Version.LessThan(&upgrades[j].Version)
|
||||
})
|
||||
ret.Downgrades = downgrades
|
||||
ret.Patches = patches
|
||||
ret.Upgrades = upgrades
|
||||
return ret, nil
|
||||
}
|
||||
@ -20,6 +20,7 @@ type (
|
||||
parseLicenseFunc func(license []byte) (parsedLicense *model.IssuedLicense, err error)
|
||||
storeLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error
|
||||
loadLocalLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error)
|
||||
summarizeLicenseFunc func(*model.CheckResponse, string) *model.Subscription
|
||||
}
|
||||
)
|
||||
|
||||
@ -102,3 +103,10 @@ func (c *fakeLicensingClient) LoadLocalLicense(ctx context.Context, dclnt licens
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeLicensingClient) SummarizeLicense(cr *model.CheckResponse, keyid string) *model.Subscription {
|
||||
if c.summarizeLicenseFunc != nil {
|
||||
return c.summarizeLicenseFunc(cr, keyid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -10,7 +10,10 @@ var (
|
||||
|
||||
// licensingPublicKey is the official public license key for store.docker.com
|
||||
// nolint: lll
|
||||
licensingPublicKey = "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2lkOiBKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVAoKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUF5ZEl5K2xVN283UGNlWSs0K3MrQwpRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHcrVTZVQy9RUmVXUmlvTU5ORTVEczVUCllFWGJHRzZvbG0ycWRXYkJ3Y0NnKzJVVUgvT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTEKSXYrbXE3bHA1blEvUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PL2dlTFBTK2hnS1B0SUhsa2c2L1djb3hUR29LUAo3OWQvd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzCkNveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTyt3YTJLTjhVd2dJSW0waTVJMXVXOUdrcTQKempCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcCtERFBFOVRHZzR6WW1YakpueFpxSEVkdQpHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZi80Nkk4Z1FXRHp4eWNaZS9KR1grTEF1YXlYcnlyClVGZWhWTlVkWlVsOXdYTmFKQitrYUNxejVRd2FSOTNzR3crUVNmdEQwTnZMZTdDeU9IK0U2dmc2U3QvTmVUdmcKdjhZbmhDaVhJbFo4SE9mSXdOZTd0RUYvVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybQprNGxWSVo3VkQvTFNXK2k3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoTy8zS1VtbDJTVVpDcXpKMXlFbUtweXNICjVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVQ0F3RUFBUT09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo="
|
||||
licensingPublicKeys = []string{
|
||||
"LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2lkOiBKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVAoKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUF5ZEl5K2xVN283UGNlWSs0K3MrQwpRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHcrVTZVQy9RUmVXUmlvTU5ORTVEczVUCllFWGJHRzZvbG0ycWRXYkJ3Y0NnKzJVVUgvT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTEKSXYrbXE3bHA1blEvUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PL2dlTFBTK2hnS1B0SUhsa2c2L1djb3hUR29LUAo3OWQvd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzCkNveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTyt3YTJLTjhVd2dJSW0waTVJMXVXOUdrcTQKempCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcCtERFBFOVRHZzR6WW1YakpueFpxSEVkdQpHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZi80Nkk4Z1FXRHp4eWNaZS9KR1grTEF1YXlYcnlyClVGZWhWTlVkWlVsOXdYTmFKQitrYUNxejVRd2FSOTNzR3crUVNmdEQwTnZMZTdDeU9IK0U2dmc2U3QvTmVUdmcKdjhZbmhDaVhJbFo4SE9mSXdOZTd0RUYvVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybQprNGxWSVo3VkQvTFNXK2k3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoTy8zS1VtbDJTVVpDcXpKMXlFbUtweXNICjVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVQ0F3RUFBUT09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=",
|
||||
"LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2V5SUQ6IFpGSjI6Q1c1Szo1M0tSOlo0NUg6NlpVQzpJNFhFOlpUS1A6TVQ1UjpQWFpMOlNTNE46RjQ0NDo0U1Q0CmtpZDogWkZKMjpDVzVLOjUzS1I6WjQ1SDo2WlVDOkk0WEU6WlRLUDpNVDVSOlBYWkw6U1M0TjpGNDQ0OjRTVDQKCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBd1FhVVRaUFhQZnloZFZVdkJkbXkKZlViYXZYL1pmdkNkMCtGREdNb0ZQazlUTlE1aVZPSkhaUVVNa2N2d2QrdVdaV3dvdWtEUGhZaWxEQTZ6Y3krQQowdERFQkF0Nmc5TGM3UFNXU1BZMTJpbWxnbC85RmJzQnZsSjFRc1RJNGlPUjQ1K0FsMHMxMWhaNG0wR1k4UXQ4CnpFN0RYU1BNUzVRTHlUcHlEemZkQURVcWFGRVcxNTVOQ3BaKzZ6N0lHZCt0V2xjalB3QzQwb3ppbWM1bXVUSWgKb2w1WG1hUFREYk45VzhDWGQ1ZWdUeEExZU43YTA3MWR0R1RialFMUEhvb0QxRURsbitvZjZ2VGFReUphWWJmQgpNRHF2NFdraG9QSzJPWWZ5OXVLR1lTNS9ieHIzUWVTUGRoWVFrQzl2YVZsRUtuTjFZaER6VXZVZGR1c3lyRUdICjd3SURBUUFCCi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=",
|
||||
}
|
||||
)
|
||||
|
||||
type (
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package licenseutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -19,7 +20,7 @@ import (
|
||||
// HubUser wraps a licensing client and holds key information
|
||||
// for a user to avoid multiple lookups
|
||||
type HubUser struct {
|
||||
client licensing.Client
|
||||
Client licensing.Client
|
||||
token string
|
||||
User model.User
|
||||
Orgs []model.Org
|
||||
@ -35,18 +36,22 @@ func (u HubUser) GetOrgByID(orgID string) (model.Org, error) {
|
||||
return model.Org{}, fmt.Errorf("org %s not found", orgID)
|
||||
}
|
||||
|
||||
// Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses
|
||||
func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
||||
func getClient() (licensing.Client, error) {
|
||||
baseURI, err := url.Parse(licensingDefaultBaseURI)
|
||||
if err != nil {
|
||||
return HubUser{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lclient, err := licensing.New(&licensing.Config{
|
||||
return licensing.New(&licensing.Config{
|
||||
BaseURI: *baseURI,
|
||||
HTTPClient: &http.Client{},
|
||||
PublicKey: licensingPublicKey,
|
||||
PublicKeys: licensingPublicKeys,
|
||||
})
|
||||
}
|
||||
|
||||
// Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses
|
||||
func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
||||
lclient, err := getClient()
|
||||
if err != nil {
|
||||
return HubUser{}, err
|
||||
}
|
||||
@ -68,7 +73,7 @@ func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
||||
return HubUser{}, err
|
||||
}
|
||||
return HubUser{
|
||||
client: lclient,
|
||||
Client: lclient,
|
||||
token: token,
|
||||
User: *user,
|
||||
Orgs: orgs,
|
||||
@ -78,12 +83,12 @@ func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
||||
|
||||
// GetAvailableLicenses finds all available licenses for a given account and their orgs
|
||||
func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {
|
||||
subs, err := u.client.ListSubscriptions(ctx, u.token, u.User.ID)
|
||||
subs, err := u.Client.ListSubscriptions(ctx, u.token, u.User.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, org := range u.Orgs {
|
||||
orgSub, err := u.client.ListSubscriptions(ctx, u.token, org.ID)
|
||||
orgSub, err := u.Client.ListSubscriptions(ctx, u.token, org.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -97,7 +102,7 @@ func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, er
|
||||
// Filter out expired licenses
|
||||
i := 0
|
||||
for _, s := range subs {
|
||||
if s.State != "expired" && s.Expires != nil {
|
||||
if s.State == "active" && s.Expires != nil {
|
||||
owner := ""
|
||||
if s.DockerID == u.User.ID {
|
||||
owner = u.User.Username
|
||||
@ -129,42 +134,50 @@ func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, er
|
||||
|
||||
// GenerateTrialLicense will generate a new trial license for the specified user or org
|
||||
func (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {
|
||||
subID, err := u.client.GenerateNewTrialSubscription(ctx, u.token, targetID, u.User.Email)
|
||||
subID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID, u.User.Email)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.client.DownloadLicenseFromHub(ctx, u.token, subID)
|
||||
return u.Client.DownloadLicenseFromHub(ctx, u.token, subID)
|
||||
}
|
||||
|
||||
// GetIssuedLicense will download a license by ID
|
||||
func (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {
|
||||
return u.client.DownloadLicenseFromHub(ctx, u.token, ID)
|
||||
return u.Client.DownloadLicenseFromHub(ctx, u.token, ID)
|
||||
}
|
||||
|
||||
// LoadLocalIssuedLicense will load a local license file
|
||||
func LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {
|
||||
baseURI, err := url.Parse(licensingDefaultBaseURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lclient, err := licensing.New(&licensing.Config{
|
||||
BaseURI: *baseURI,
|
||||
HTTPClient: &http.Client{},
|
||||
PublicKey: licensingPublicKey,
|
||||
})
|
||||
lclient, err := getClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return doLoadLocalIssuedLicense(ctx, filename, lclient)
|
||||
}
|
||||
|
||||
// GetLicenseSummary summarizes the license for the user
|
||||
func GetLicenseSummary(ctx context.Context, license model.IssuedLicense) (string, error) {
|
||||
lclient, err := getClient()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cr, err := lclient.VerifyLicense(ctx, license)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return lclient.SummarizeLicense(cr, license.KeyID).String(), nil
|
||||
}
|
||||
|
||||
func doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {
|
||||
var license model.IssuedLicense
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The file may contain a leading BOM, which will choke the
|
||||
// json deserializer.
|
||||
data = bytes.TrimPrefix(data, []byte("\xef\xbb\xbf"))
|
||||
|
||||
err = json.Unmarshal(data, &license)
|
||||
if err != nil {
|
||||
|
||||
@ -43,7 +43,7 @@ func TestGetOrgByID(t *testing.T) {
|
||||
func TestGetAvailableLicensesListFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := HubUser{
|
||||
client: &fakeLicensingClient{
|
||||
Client: &fakeLicensingClient{
|
||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||
return nil, fmt.Errorf("list subscriptions error")
|
||||
},
|
||||
@ -59,7 +59,7 @@ func TestGetAvailableLicensesOrgFail(t *testing.T) {
|
||||
Orgs: []model.Org{
|
||||
{ID: "orgid"},
|
||||
},
|
||||
client: &fakeLicensingClient{
|
||||
Client: &fakeLicensingClient{
|
||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||
if dockerID == "orgid" {
|
||||
return nil, fmt.Errorf("list subscriptions org error")
|
||||
@ -86,7 +86,7 @@ func TestGetAvailableLicensesHappy(t *testing.T) {
|
||||
Orgname: "orgname",
|
||||
},
|
||||
},
|
||||
client: &fakeLicensingClient{
|
||||
Client: &fakeLicensingClient{
|
||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||
if dockerID == "orgid" {
|
||||
return []*model.Subscription{
|
||||
@ -146,7 +146,7 @@ func TestGetAvailableLicensesHappy(t *testing.T) {
|
||||
func TestGenerateTrialFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := HubUser{
|
||||
client: &fakeLicensingClient{
|
||||
Client: &fakeLicensingClient{
|
||||
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||
return "", fmt.Errorf("generate trial failure")
|
||||
},
|
||||
@ -160,7 +160,7 @@ func TestGenerateTrialFail(t *testing.T) {
|
||||
func TestGenerateTrialHappy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := HubUser{
|
||||
client: &fakeLicensingClient{
|
||||
Client: &fakeLicensingClient{
|
||||
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||
return "subid", nil
|
||||
},
|
||||
@ -174,7 +174,7 @@ func TestGenerateTrialHappy(t *testing.T) {
|
||||
func TestGetIssuedLicense(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := HubUser{
|
||||
client: &fakeLicensingClient{},
|
||||
Client: &fakeLicensingClient{},
|
||||
}
|
||||
id := "idgoeshere"
|
||||
_, err := user.GetIssuedLicense(ctx, id)
|
||||
|
||||
66
internal/pkg/containerized/proxy.go
Normal file
66
internal/pkg/containerized/proxy.go
Normal file
@ -0,0 +1,66 @@
|
||||
package containerized
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
proxydir = "/etc/containerd-proxy"
|
||||
)
|
||||
|
||||
type proxyConfig struct {
|
||||
ID string `json:"-"`
|
||||
Namespace string `json:"namespace"`
|
||||
Image string `json:"image"`
|
||||
ImagePath string `json:"imagePath"`
|
||||
Args []string `json:"args"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
func updateConfig(name, newImage string) error {
|
||||
cfg, err := loadConfig(name)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.Image = newImage
|
||||
cfg.ImagePath = ""
|
||||
return storeConfig(name, cfg)
|
||||
}
|
||||
|
||||
func loadConfig(name string) (*proxyConfig, error) {
|
||||
configFile := filepath.Join(proxydir, name+".json")
|
||||
data, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cfg proxyConfig
|
||||
err = json.Unmarshal(data, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// storeConfig will write out the config only if it already exists
|
||||
func storeConfig(name string, cfg *proxyConfig) error {
|
||||
configFile := filepath.Join(proxydir, name+".json")
|
||||
fd, err := os.OpenFile(configFile, os.O_RDWR, 0644)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fd.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc := json.NewEncoder(fd)
|
||||
return enc.Encode(cfg)
|
||||
}
|
||||
68
internal/pkg/containerized/proxy_test.go
Normal file
68
internal/pkg/containerized/proxy_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
package containerized
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestUpdateConfigNotExist(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "cfg-update")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
origProxyDir := proxydir
|
||||
defer func() {
|
||||
proxydir = origProxyDir
|
||||
}()
|
||||
proxydir = tmpdir
|
||||
name := "myname"
|
||||
newImage := "newimage:foo"
|
||||
err = updateConfig(name, newImage)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateConfigBadJson(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "cfg-update")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
origProxyDir := proxydir
|
||||
defer func() {
|
||||
proxydir = origProxyDir
|
||||
}()
|
||||
proxydir = tmpdir
|
||||
filename := filepath.Join(tmpdir, "dockerd.json")
|
||||
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
||||
assert.NilError(t, err)
|
||||
name := "dockerd"
|
||||
newImage := "newimage:foo"
|
||||
err = updateConfig(name, newImage)
|
||||
assert.ErrorContains(t, err, "invalid character")
|
||||
}
|
||||
|
||||
func TestUpdateConfigHappyPath(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "cfg-update")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
origProxyDir := proxydir
|
||||
defer func() {
|
||||
proxydir = origProxyDir
|
||||
}()
|
||||
proxydir = tmpdir
|
||||
filename := filepath.Join(tmpdir, "dockerd.json")
|
||||
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
|
||||
assert.NilError(t, err)
|
||||
name := "dockerd"
|
||||
newImage := "newimage:foo"
|
||||
err = updateConfig(name, newImage)
|
||||
assert.NilError(t, err)
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
assert.NilError(t, err)
|
||||
var cfg map[string]string
|
||||
err = json.Unmarshal(data, &cfg)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cfg["image"] == newImage)
|
||||
}
|
||||
@ -50,6 +50,10 @@ func WithUpgrade(i containerd.Image) containerd.UpdateContainerOpts {
|
||||
return err
|
||||
}
|
||||
c.Image = i.Name()
|
||||
err = updateConfig(c.ID, c.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = revision.Key
|
||||
return nil
|
||||
}
|
||||
@ -74,6 +78,10 @@ func WithRollback(ctx context.Context, client *containerd.Client, c *containers.
|
||||
return fmt.Errorf("snapshot %s has an empty service image label", prev.Key)
|
||||
}
|
||||
c.Image = snapshotImage
|
||||
err = updateConfig(c.ID, c.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = prev.Key
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/client"
|
||||
notaryclient "github.com/theupdateframework/notary/client"
|
||||
)
|
||||
@ -20,7 +20,7 @@ import (
|
||||
// NotaryClientFuncType defines a function that returns a fake notary client
|
||||
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||
type clientInfoFuncType func() command.ClientInfo
|
||||
type containerizedEngineFuncType func(string) (containerizedengine.Client, error)
|
||||
type containerizedEngineFuncType func(string) (clitypes.ContainerizedClient, error)
|
||||
|
||||
// FakeCli emulates the default DockerCli
|
||||
type FakeCli struct {
|
||||
@ -172,7 +172,7 @@ func EnableContentTrust(c *FakeCli) {
|
||||
}
|
||||
|
||||
// NewContainerizedEngineClient returns a containerized engine client
|
||||
func (c *FakeCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
|
||||
func (c *FakeCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
if c.containerizedEngineClientFunc != nil {
|
||||
return c.containerizedEngineClientFunc(sockPath)
|
||||
}
|
||||
|
||||
127
internal/versions/versions.go
Normal file
127
internal/versions/versions.go
Normal file
@ -0,0 +1,127 @@
|
||||
package versions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultRuntimeMetadataDir is the location where the metadata file is stored
|
||||
defaultRuntimeMetadataDir = "/var/lib/docker-engine"
|
||||
)
|
||||
|
||||
// GetEngineVersions reports the versions of the engine that are available
|
||||
func GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, registryPrefix, imageName, versionString string) (clitypes.AvailableVersions, error) {
|
||||
|
||||
if imageName == "" {
|
||||
var err error
|
||||
localMetadata, err := GetCurrentRuntimeMetadata("")
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
imageName = localMetadata.EngineImage
|
||||
}
|
||||
imageRef, err := reference.ParseNormalizedNamed(path.Join(registryPrefix, imageName))
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
|
||||
tags, err := registryClient.GetTags(ctx, imageRef)
|
||||
if err != nil {
|
||||
return clitypes.AvailableVersions{}, err
|
||||
}
|
||||
|
||||
return parseTags(tags, versionString)
|
||||
}
|
||||
|
||||
func parseTags(tags []string, currentVersion string) (clitypes.AvailableVersions, error) {
|
||||
var ret clitypes.AvailableVersions
|
||||
currentVer, err := ver.NewVersion(currentVersion)
|
||||
if err != nil {
|
||||
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
|
||||
}
|
||||
downgrades := []clitypes.DockerVersion{}
|
||||
patches := []clitypes.DockerVersion{}
|
||||
upgrades := []clitypes.DockerVersion{}
|
||||
currentSegments := currentVer.Segments()
|
||||
for _, tag := range tags {
|
||||
tmp, err := ver.NewVersion(tag)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to parse %s: %s", tag, err)
|
||||
continue
|
||||
}
|
||||
testVersion := clitypes.DockerVersion{Version: *tmp, Tag: tag}
|
||||
if testVersion.LessThan(currentVer) {
|
||||
downgrades = append(downgrades, testVersion)
|
||||
continue
|
||||
}
|
||||
testSegments := testVersion.Segments()
|
||||
// lib always provides min 3 segments
|
||||
if testSegments[0] == currentSegments[0] &&
|
||||
testSegments[1] == currentSegments[1] {
|
||||
patches = append(patches, testVersion)
|
||||
} else {
|
||||
upgrades = append(upgrades, testVersion)
|
||||
}
|
||||
}
|
||||
sort.Slice(downgrades, func(i, j int) bool {
|
||||
return downgrades[i].Version.LessThan(&downgrades[j].Version)
|
||||
})
|
||||
sort.Slice(patches, func(i, j int) bool {
|
||||
return patches[i].Version.LessThan(&patches[j].Version)
|
||||
})
|
||||
sort.Slice(upgrades, func(i, j int) bool {
|
||||
return upgrades[i].Version.LessThan(&upgrades[j].Version)
|
||||
})
|
||||
ret.Downgrades = downgrades
|
||||
ret.Patches = patches
|
||||
ret.Upgrades = upgrades
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetCurrentRuntimeMetadata loads the current daemon runtime metadata information from the local host
|
||||
func GetCurrentRuntimeMetadata(metadataDir string) (*clitypes.RuntimeMetadata, error) {
|
||||
if metadataDir == "" {
|
||||
metadataDir = defaultRuntimeMetadataDir
|
||||
}
|
||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res clitypes.RuntimeMetadata
|
||||
err = json.Unmarshal(data, &res)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "malformed runtime metadata file %s", filename)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// WriteRuntimeMetadata stores the metadata on the local system
|
||||
func WriteRuntimeMetadata(metadataDir string, metadata *clitypes.RuntimeMetadata) error {
|
||||
if metadataDir == "" {
|
||||
metadataDir = defaultRuntimeMetadataDir
|
||||
}
|
||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
||||
|
||||
data, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os.Remove(filename)
|
||||
return ioutil.WriteFile(filename, data, 0644)
|
||||
}
|
||||
@ -1,22 +1,15 @@
|
||||
package containerizedengine
|
||||
package versions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestGetEngineVersionsBadImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
|
||||
currentVersion := "currentversiongoeshere"
|
||||
imageName := "this is an illegal image $%^&"
|
||||
_, err := client.GetEngineVersions(ctx, nil, currentVersion, imageName)
|
||||
assert.ErrorContains(t, err, "invalid reference format")
|
||||
}
|
||||
|
||||
func TestParseTagsSimple(t *testing.T) {
|
||||
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
||||
currentVersion := "1.1.0"
|
||||
@ -78,3 +71,35 @@ func TestParseBadCurrent2(t *testing.T) {
|
||||
_, err := parseTags(tags, currentVersion)
|
||||
assert.ErrorContains(t, err, "failed to parse existing")
|
||||
}
|
||||
|
||||
func TestGetCurrentRuntimeMetadataNotPresent(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
_, err = GetCurrentRuntimeMetadata(tmpdir)
|
||||
assert.ErrorType(t, err, os.IsNotExist)
|
||||
}
|
||||
|
||||
func TestGetCurrentRuntimeMetadataBadJson(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
filename := filepath.Join(tmpdir, clitypes.RuntimeMetadataName+".json")
|
||||
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
||||
assert.NilError(t, err)
|
||||
_, err = GetCurrentRuntimeMetadata(tmpdir)
|
||||
assert.ErrorContains(t, err, "malformed runtime metadata file")
|
||||
}
|
||||
|
||||
func TestGetCurrentRuntimeMetadataHappyPath(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
metadata := clitypes.RuntimeMetadata{Platform: "platformgoeshere"}
|
||||
err = WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
res, err := GetCurrentRuntimeMetadata(tmpdir)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, res.Platform, "platformgoeshere")
|
||||
}
|
||||
@ -25,7 +25,7 @@ func generateManPages(opts *options) error {
|
||||
}
|
||||
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false)
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false, nil)
|
||||
cmd := &cobra.Command{Use: "docker"}
|
||||
commands.AddCommands(cmd, dockerCli)
|
||||
source := filepath.Join(opts.source, descriptionSourcePath)
|
||||
|
||||
@ -38,5 +38,13 @@ This example sets the docker image ENV variable DEBUG to true by default.
|
||||
|
||||
# tar -c . | docker image import -c="ENV DEBUG true" - exampleimagedir
|
||||
|
||||
## When the daemon supports multiple operating systems
|
||||
If the daemon supports multiple operating systems, and the image being imported
|
||||
does not match the default operating system, it may be necessary to add
|
||||
`--platform`. This would be necessary when importing a Linux image into a Windows
|
||||
daemon.
|
||||
|
||||
# docker image import --platform=linux .\linuximage.tar
|
||||
|
||||
# See also
|
||||
**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT.
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# TODO fetch images?
|
||||
./scripts/test/engine/wrapper
|
||||
@ -1,107 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Run engine specific integration tests against the latest containerd-in-docker
|
||||
set -eu -o pipefail
|
||||
|
||||
function container_ip {
|
||||
local cid=$1
|
||||
local network=$2
|
||||
docker inspect \
|
||||
-f "{{.NetworkSettings.Networks.${network}.IPAddress}}" "$cid"
|
||||
}
|
||||
|
||||
function fetch_images {
|
||||
## TODO - not yet implemented
|
||||
./scripts/test/engine/load-image fetch-only
|
||||
}
|
||||
|
||||
function setup {
|
||||
### start containerd and log to a file
|
||||
echo "Starting containerd in the background"
|
||||
containerd 2&> /tmp/containerd.err &
|
||||
echo "Waiting for containerd to be responsive"
|
||||
# shellcheck disable=SC2034
|
||||
for i in $(seq 1 60); do
|
||||
if ctr namespace ls > /dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
ctr namespace ls > /dev/null
|
||||
echo "containerd is ready"
|
||||
|
||||
# TODO Once https://github.com/moby/moby/pull/33355 or equivalent
|
||||
# is merged, then this can be optimized to preload the image
|
||||
# saved during the build phase
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
#### if testexit is non-zero dump the containerd logs with a banner
|
||||
if [ "${testexit}" -ne 0 ] ; then
|
||||
echo "FAIL: dumping containerd logs"
|
||||
echo ""
|
||||
cat /tmp/containerd.err
|
||||
if [ -f /var/log/engine.log ] ; then
|
||||
echo ""
|
||||
echo "FAIL: dumping engine log"
|
||||
echo ""
|
||||
else
|
||||
echo ""
|
||||
echo "FAIL: engine log missing"
|
||||
echo ""
|
||||
fi
|
||||
echo "FAIL: remaining namespaces"
|
||||
ctr namespace ls || /bin/tru
|
||||
echo "FAIL: remaining containers"
|
||||
ctr --namespace docker container ls || /bin/tru
|
||||
echo "FAIL: remaining tasks"
|
||||
ctr --namespace docker task ls || /bin/tru
|
||||
echo "FAIL: remaining snapshots"
|
||||
ctr --namespace docker snapshots ls || /bin/tru
|
||||
echo "FAIL: remaining images"
|
||||
ctr --namespace docker image ls || /bin/tru
|
||||
fi
|
||||
}
|
||||
|
||||
function runtests {
|
||||
# shellcheck disable=SC2086
|
||||
env -i \
|
||||
GOPATH="$GOPATH" \
|
||||
PATH="$PWD/build/:${PATH}" \
|
||||
VERSION=${VERSION} \
|
||||
"$(which go)" test -p 1 -parallel 1 -v ./e2eengine/... ${TESTFLAGS-}
|
||||
}
|
||||
|
||||
cmd=${1-}
|
||||
|
||||
case "$cmd" in
|
||||
setup)
|
||||
setup
|
||||
exit
|
||||
;;
|
||||
cleanup)
|
||||
cleanup
|
||||
exit
|
||||
;;
|
||||
fetch-images)
|
||||
fetch_images
|
||||
exit
|
||||
;;
|
||||
test)
|
||||
runtests
|
||||
;;
|
||||
run|"")
|
||||
testexit=0
|
||||
runtests || testexit=$?
|
||||
cleanup
|
||||
exit $testexit
|
||||
;;
|
||||
shell)
|
||||
$SHELL
|
||||
;;
|
||||
*)
|
||||
echo "Unknown command: $cmd"
|
||||
echo "Usage: "
|
||||
echo " $0 [setup | cleanup | test | run]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Setup, run and teardown engine test suite in containers.
|
||||
set -eu -o pipefail
|
||||
|
||||
./scripts/test/engine/run setup
|
||||
|
||||
testexit=0
|
||||
|
||||
test_cmd="test"
|
||||
if [[ -n "${TEST_DEBUG-}" ]]; then
|
||||
test_cmd="shell"
|
||||
fi
|
||||
|
||||
./scripts/test/engine/run "$test_cmd" || testexit="$?"
|
||||
|
||||
export testexit
|
||||
./scripts/test/engine/run cleanup
|
||||
exit "$testexit"
|
||||
88
types/types.go
Normal file
88
types/types.go
Normal file
@ -0,0 +1,88 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
const (
|
||||
// CommunityEngineImage is the repo name for the community engine
|
||||
CommunityEngineImage = "engine-community"
|
||||
|
||||
// EnterpriseEngineImage is the repo name for the enterprise engine
|
||||
EnterpriseEngineImage = "engine-enterprise"
|
||||
|
||||
// RegistryPrefix is the default prefix used to pull engine images
|
||||
RegistryPrefix = "docker.io/store/docker"
|
||||
|
||||
// ReleaseNotePrefix is where to point users to for release notes
|
||||
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
|
||||
// RuntimeMetadataName is the name of the runtime metadata file
|
||||
// When stored as a label on the container it is prefixed by "com.docker."
|
||||
RuntimeMetadataName = "distribution_based_engine"
|
||||
)
|
||||
|
||||
// ContainerizedClient can be used to manage the lifecycle of
|
||||
// dockerd running as a container on containerd.
|
||||
type ContainerizedClient interface {
|
||||
Close() error
|
||||
ActivateEngine(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
DoUpdate(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
}
|
||||
|
||||
// EngineInitOptions contains the configuration settings
|
||||
// use during initialization of a containerized docker engine
|
||||
type EngineInitOptions struct {
|
||||
RegistryPrefix string
|
||||
EngineImage string
|
||||
EngineVersion string
|
||||
ConfigFile string
|
||||
RuntimeMetadataDir string
|
||||
}
|
||||
|
||||
// AvailableVersions groups the available versions which were discovered
|
||||
type AvailableVersions struct {
|
||||
Downgrades []DockerVersion
|
||||
Patches []DockerVersion
|
||||
Upgrades []DockerVersion
|
||||
}
|
||||
|
||||
// DockerVersion wraps a semantic version to retain the original tag
|
||||
// since the docker date based versions don't strictly follow semantic
|
||||
// versioning (leading zeros, etc.)
|
||||
type DockerVersion struct {
|
||||
ver.Version
|
||||
Tag string
|
||||
}
|
||||
|
||||
// Update stores available updates for rendering in a table
|
||||
type Update struct {
|
||||
Type string
|
||||
Version string
|
||||
Notes string
|
||||
}
|
||||
|
||||
// OutStream is an output stream used to write normal program output.
|
||||
type OutStream interface {
|
||||
io.Writer
|
||||
FD() uintptr
|
||||
IsTerminal() bool
|
||||
}
|
||||
|
||||
// RuntimeMetadata holds platform information about the daemon
|
||||
type RuntimeMetadata struct {
|
||||
Platform string `json:"platform"`
|
||||
ContainerdMinVersion string `json:"containerd_min_version"`
|
||||
Runtime string `json:"runtime"`
|
||||
EngineImage string `json:"engine_image"`
|
||||
}
|
||||
26
vendor.conf
26
vendor.conf
@ -2,8 +2,8 @@ github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
|
||||
github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceafb
|
||||
github.com/containerd/console 4d8a41f4ce5b9bae77c41786ea2458330f43f081
|
||||
github.com/containerd/containerd v1.2.0-beta.0
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/containerd bb0f83ab6eec47c3316bb763d5c20a82c7750c31
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/containerd/fifo 3d5202a
|
||||
github.com/containerd/typeurl f694355
|
||||
@ -12,7 +12,7 @@ github.com/cpuguy83/go-md2man v1.0.8
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 # v1.1.0
|
||||
github.com/dgrijalva/jwt-go a2c85815a77d0f951e33ba4db5ae93629a1530af
|
||||
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
|
||||
github.com/docker/docker 2629fe93266e82751af4f1c7568e21060f065b73
|
||||
github.com/docker/docker d2ecc7bad104139c118249ad159b45315a022754 https://github.com/docker/engine # 18.09 branch
|
||||
github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962
|
||||
# the docker/go package contains a customized version of canonical/json
|
||||
# and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
@ -22,11 +22,11 @@ github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
||||
github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/docker/licensing 369e530
|
||||
github.com/docker/licensing f2eae57157a06681b024f1690923d03e414179a0
|
||||
github.com/docker/swarmkit cfa742c8abe6f8e922f6e4e920153c408e7d9c3b
|
||||
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
||||
github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 # v1.0.0
|
||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/gogo/googleapis b23578765ee54ff6bceff57f397d833bf4ca6869
|
||||
github.com/gogo/protobuf v1.1.1
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf v1.1.0
|
||||
@ -47,18 +47,18 @@ github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 #
|
||||
github.com/json-iterator/go ab8a2e0c74be9d3be70b3184d9acc634935ded82 # 1.1.4
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/Microsoft/hcsshim v0.6.11
|
||||
github.com/Microsoft/go-winio v0.4.9
|
||||
github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
|
||||
github.com/Microsoft/go-winio v0.4.10
|
||||
github.com/miekg/pkcs11 287d9350987cc9334667882061e202e96cdfb4d0
|
||||
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
|
||||
github.com/moby/buildkit e8c7acc99c33f5e73b8c38f618406392dee59675
|
||||
github.com/moby/buildkit 6812dac65e0440bb75affce1fb2175e640edc15d
|
||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
|
||||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
||||
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
|
||||
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6 # v2.0.1
|
||||
@ -92,9 +92,9 @@ google.golang.org/grpc v1.12.0
|
||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
|
||||
gotest.tools v2.1.0
|
||||
k8s.io/api kubernetes-1.11.0
|
||||
k8s.io/apimachinery kubernetes-1.11.0
|
||||
k8s.io/client-go kubernetes-1.11.0
|
||||
k8s.io/api kubernetes-1.11.2
|
||||
k8s.io/apimachinery kubernetes-1.11.2
|
||||
k8s.io/client-go kubernetes-1.11.2
|
||||
k8s.io/kube-openapi d8ea2fe547a448256204cfc68dfee7b26c720acb
|
||||
k8s.io/kubernetes v1.11.0
|
||||
k8s.io/kubernetes v1.11.2
|
||||
vbom.ml/util 256737ac55c46798123f754ab7d2c784e2c71783
|
||||
|
||||
3
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@ -20,7 +20,8 @@ const (
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
FileAttributes uintptr // includes padding
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
|
||||
18
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
18
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
@ -1,12 +1,13 @@
|
||||
# hcsshim
|
||||
|
||||
This package supports launching Windows Server containers from Go. It is
|
||||
primarily used in the [Docker Engine](https://github.com/docker/docker) project,
|
||||
but it can be freely used by other projects as well.
|
||||
[](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
|
||||
|
||||
This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
|
||||
|
||||
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
|
||||
|
||||
## Contributing
|
||||
---------------
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
@ -19,6 +20,11 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
## Dependencies
|
||||
|
||||
This project requires Golang 1.9 or newer to build.
|
||||
|
||||
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
@ -29,5 +35,7 @@ email to ensure we received your original message. Further information, includin
|
||||
[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in
|
||||
the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
|
||||
|
||||
-------------------------------------------
|
||||
For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet
|
||||
|
||||
---------------
|
||||
Copyright (c) 2018 Microsoft Corp. All rights reserved.
|
||||
|
||||
28
vendor/github.com/Microsoft/hcsshim/activatelayer.go
generated
vendored
28
vendor/github.com/Microsoft/hcsshim/activatelayer.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// ActivateLayer will find the layer with the given id and mount it's filesystem.
|
||||
// For a read/write layer, the mounted filesystem will appear as a volume on the
|
||||
// host, while a read-only layer is generally expected to be a no-op.
|
||||
// An activated layer must later be deactivated via DeactivateLayer.
|
||||
func ActivateLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::ActivateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = activateLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
|
||||
return nil
|
||||
}
|
||||
748
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
748
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
@ -1,800 +1,192 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/mergemaps"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTimeout = time.Minute * 4
|
||||
)
|
||||
|
||||
const (
|
||||
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
|
||||
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
|
||||
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
|
||||
mappedVirtualDiskQuery = `{ "PropertyTypes" : ["MappedVirtualDisk"]}`
|
||||
)
|
||||
|
||||
type container struct {
|
||||
handleLock sync.RWMutex
|
||||
handle hcsSystem
|
||||
id string
|
||||
callbackNumber uintptr
|
||||
}
|
||||
|
||||
// ContainerProperties holds the properties for a container and the processes running in that container
|
||||
type ContainerProperties struct {
|
||||
ID string `json:"Id"`
|
||||
Name string
|
||||
SystemType string
|
||||
Owner string
|
||||
SiloGUID string `json:"SiloGuid,omitempty"`
|
||||
RuntimeID string `json:"RuntimeId,omitempty"`
|
||||
IsRuntimeTemplate bool `json:",omitempty"`
|
||||
RuntimeImagePath string `json:",omitempty"`
|
||||
Stopped bool `json:",omitempty"`
|
||||
ExitType string `json:",omitempty"`
|
||||
AreUpdatesPending bool `json:",omitempty"`
|
||||
ObRoot string `json:",omitempty"`
|
||||
Statistics Statistics `json:",omitempty"`
|
||||
ProcessList []ProcessListItem `json:",omitempty"`
|
||||
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
|
||||
}
|
||||
type ContainerProperties = schema1.ContainerProperties
|
||||
|
||||
// MemoryStats holds the memory statistics for a container
|
||||
type MemoryStats struct {
|
||||
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
|
||||
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
|
||||
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
|
||||
}
|
||||
type MemoryStats = schema1.MemoryStats
|
||||
|
||||
// ProcessorStats holds the processor statistics for a container
|
||||
type ProcessorStats struct {
|
||||
TotalRuntime100ns uint64 `json:",omitempty"`
|
||||
RuntimeUser100ns uint64 `json:",omitempty"`
|
||||
RuntimeKernel100ns uint64 `json:",omitempty"`
|
||||
}
|
||||
type ProcessorStats = schema1.ProcessorStats
|
||||
|
||||
// StorageStats holds the storage statistics for a container
|
||||
type StorageStats struct {
|
||||
ReadCountNormalized uint64 `json:",omitempty"`
|
||||
ReadSizeBytes uint64 `json:",omitempty"`
|
||||
WriteCountNormalized uint64 `json:",omitempty"`
|
||||
WriteSizeBytes uint64 `json:",omitempty"`
|
||||
}
|
||||
type StorageStats = schema1.StorageStats
|
||||
|
||||
// NetworkStats holds the network statistics for a container
|
||||
type NetworkStats struct {
|
||||
BytesReceived uint64 `json:",omitempty"`
|
||||
BytesSent uint64 `json:",omitempty"`
|
||||
PacketsReceived uint64 `json:",omitempty"`
|
||||
PacketsSent uint64 `json:",omitempty"`
|
||||
DroppedPacketsIncoming uint64 `json:",omitempty"`
|
||||
DroppedPacketsOutgoing uint64 `json:",omitempty"`
|
||||
EndpointId string `json:",omitempty"`
|
||||
InstanceId string `json:",omitempty"`
|
||||
}
|
||||
type NetworkStats = schema1.NetworkStats
|
||||
|
||||
// Statistics is the structure returned by a statistics call on a container
|
||||
type Statistics struct {
|
||||
Timestamp time.Time `json:",omitempty"`
|
||||
ContainerStartTime time.Time `json:",omitempty"`
|
||||
Uptime100ns uint64 `json:",omitempty"`
|
||||
Memory MemoryStats `json:",omitempty"`
|
||||
Processor ProcessorStats `json:",omitempty"`
|
||||
Storage StorageStats `json:",omitempty"`
|
||||
Network []NetworkStats `json:",omitempty"`
|
||||
}
|
||||
type Statistics = schema1.Statistics
|
||||
|
||||
// ProcessList is the structure of an item returned by a ProcessList call on a container
|
||||
type ProcessListItem struct {
|
||||
CreateTimestamp time.Time `json:",omitempty"`
|
||||
ImageName string `json:",omitempty"`
|
||||
KernelTime100ns uint64 `json:",omitempty"`
|
||||
MemoryCommitBytes uint64 `json:",omitempty"`
|
||||
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
|
||||
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
|
||||
ProcessId uint32 `json:",omitempty"`
|
||||
UserTime100ns uint64 `json:",omitempty"`
|
||||
}
|
||||
type ProcessListItem = schema1.ProcessListItem
|
||||
|
||||
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
|
||||
type MappedVirtualDiskController struct {
|
||||
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
|
||||
}
|
||||
type MappedVirtualDiskController = schema1.MappedVirtualDiskController
|
||||
|
||||
// Type of Request Support in ModifySystem
|
||||
type RequestType string
|
||||
type RequestType = schema1.RequestType
|
||||
|
||||
// Type of Resource Support in ModifySystem
|
||||
type ResourceType string
|
||||
type ResourceType = schema1.ResourceType
|
||||
|
||||
// RequestType const
|
||||
const (
|
||||
Add RequestType = "Add"
|
||||
Remove RequestType = "Remove"
|
||||
Network ResourceType = "Network"
|
||||
Add = schema1.Add
|
||||
Remove = schema1.Remove
|
||||
Network = schema1.Network
|
||||
)
|
||||
|
||||
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
|
||||
// Supported resource types are Network and Request Types are Add/Remove
|
||||
type ResourceModificationRequestResponse struct {
|
||||
Resource ResourceType `json:"ResourceType"`
|
||||
Data interface{} `json:"Settings"`
|
||||
Request RequestType `json:"RequestType,omitempty"`
|
||||
type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse
|
||||
|
||||
type container struct {
|
||||
system *hcs.System
|
||||
}
|
||||
|
||||
// createContainerAdditionalJSON is read from the environment at initialisation
|
||||
// createComputeSystemAdditionalJSON is read from the environment at initialisation
|
||||
// time. It allows an environment variable to define additional JSON which
|
||||
// is merged in the CreateContainer call to HCS.
|
||||
var createContainerAdditionalJSON string
|
||||
// is merged in the CreateComputeSystem call to HCS.
|
||||
var createContainerAdditionalJSON []byte
|
||||
|
||||
func init() {
|
||||
createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")
|
||||
createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON"))
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container with the given configuration but does not start it.
|
||||
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
|
||||
return createContainerWithJSON(id, c, "")
|
||||
}
|
||||
|
||||
// CreateContainerWithJSON creates a new container with the given configuration but does not start it.
|
||||
// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS.
|
||||
func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
|
||||
return createContainerWithJSON(id, c, additionalJSON)
|
||||
}
|
||||
|
||||
func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
|
||||
operation := "CreateContainer"
|
||||
title := "HCSShim::" + operation
|
||||
|
||||
container := &container{
|
||||
id: id,
|
||||
fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
system, err := hcs.CreateComputeSystem(id, fullConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Debugf(title+" id=%s config=%s", id, configuration)
|
||||
|
||||
// Merge any additional JSON. Priority is given to what is passed in explicitly,
|
||||
// falling back to what's set in the environment.
|
||||
if additionalJSON == "" && createContainerAdditionalJSON != "" {
|
||||
additionalJSON = createContainerAdditionalJSON
|
||||
}
|
||||
if additionalJSON != "" {
|
||||
configurationMap := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err)
|
||||
}
|
||||
|
||||
additionalMap := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err)
|
||||
}
|
||||
|
||||
mergedMap := mergeMaps(additionalMap, configurationMap)
|
||||
mergedJSON, err := json.Marshal(mergedMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err)
|
||||
}
|
||||
|
||||
configuration = string(mergedJSON)
|
||||
logrus.Debugf(title+" id=%s merged config=%s", id, configuration)
|
||||
}
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
identity syscall.Handle
|
||||
)
|
||||
createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
|
||||
|
||||
if createError == nil || IsPending(createError) {
|
||||
if err := container.registerCallback(); err != nil {
|
||||
// Terminate the container if it still exists. We're okay to ignore a failure here.
|
||||
container.Terminate()
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
if err == ErrTimeout {
|
||||
// Terminate the container if it still exists. We're okay to ignore a failure here.
|
||||
container.Terminate()
|
||||
}
|
||||
return nil, makeContainerError(container, operation, configuration, err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
|
||||
// in ToMap are overwritten. Values in fromMap are added to ToMap.
|
||||
// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
|
||||
func mergeMaps(fromMap, ToMap interface{}) interface{} {
|
||||
switch fromMap := fromMap.(type) {
|
||||
case map[string]interface{}:
|
||||
ToMap, ok := ToMap.(map[string]interface{})
|
||||
if !ok {
|
||||
return fromMap
|
||||
}
|
||||
for keyToMap, valueToMap := range ToMap {
|
||||
if valueFromMap, ok := fromMap[keyToMap]; ok {
|
||||
fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap)
|
||||
} else {
|
||||
fromMap[keyToMap] = valueToMap
|
||||
}
|
||||
}
|
||||
case nil:
|
||||
// merge(nil, map[string]interface{...}) -> map[string]interface{...}
|
||||
ToMap, ok := ToMap.(map[string]interface{})
|
||||
if ok {
|
||||
return ToMap
|
||||
}
|
||||
}
|
||||
return fromMap
|
||||
return &container{system}, err
|
||||
}
|
||||
|
||||
// OpenContainer opens an existing container by ID.
|
||||
func OpenContainer(id string) (Container, error) {
|
||||
operation := "OpenContainer"
|
||||
title := "HCSShim::" + operation
|
||||
logrus.Debugf(title+" id=%s", id)
|
||||
|
||||
container := &container{
|
||||
id: id,
|
||||
}
|
||||
|
||||
var (
|
||||
handle hcsSystem
|
||||
resultp *uint16
|
||||
)
|
||||
err := hcsOpenComputeSystem(id, &handle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
system, err := hcs.OpenComputeSystem(id)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container.handle = handle
|
||||
|
||||
if err := container.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
|
||||
return container, nil
|
||||
return &container{system}, err
|
||||
}
|
||||
|
||||
// GetContainers gets a list of the containers on the system that match the query
|
||||
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
|
||||
operation := "GetContainers"
|
||||
title := "HCSShim::" + operation
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := string(queryb)
|
||||
logrus.Debugf(title+" query=%s", query)
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
computeSystemsp *uint16
|
||||
)
|
||||
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if computeSystemsp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp)
|
||||
computeSystems := []ContainerProperties{}
|
||||
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return computeSystems, nil
|
||||
return hcs.GetComputeSystems(q)
|
||||
}
|
||||
|
||||
// Start synchronously starts the container.
|
||||
func (container *container) Start() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Start"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsStartComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Start(), container)
|
||||
}
|
||||
|
||||
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
|
||||
func (container *container) Shutdown() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Shutdown"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsShutdownComputeSystem(container.handle, "", &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Shutdown(), container)
|
||||
}
|
||||
|
||||
// Terminate requests a container terminate, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
|
||||
func (container *container) Terminate() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Terminate"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsTerminateComputeSystem(container.handle, "", &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Terminate(), container)
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the container to shutdown or terminate.
|
||||
// Waits synchronously waits for the container to shutdown or terminate.
|
||||
func (container *container) Wait() error {
|
||||
operation := "Wait"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Wait(), container)
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
|
||||
// If the timeout expires, IsTimeout(err) == true
|
||||
func (container *container) WaitTimeout(timeout time.Duration) error {
|
||||
operation := "WaitTimeout"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
|
||||
// returns false if timeout occurs.
|
||||
func (container *container) WaitTimeout(t time.Duration) error {
|
||||
return convertSystemError(container.system.WaitTimeout(t), container)
|
||||
}
|
||||
|
||||
func (container *container) properties(query string) (*ContainerProperties, error) {
|
||||
var (
|
||||
resultp *uint16
|
||||
propertiesp *uint16
|
||||
)
|
||||
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Pause pauses the execution of a container.
|
||||
func (container *container) Pause() error {
|
||||
return convertSystemError(container.system.Pause(), container)
|
||||
}
|
||||
|
||||
if propertiesp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
|
||||
properties := &ContainerProperties{}
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return properties, nil
|
||||
// Resume resumes the execution of a container.
|
||||
func (container *container) Resume() error {
|
||||
return convertSystemError(container.system.Resume(), container)
|
||||
}
|
||||
|
||||
// HasPendingUpdates returns true if the container has updates pending to install
|
||||
func (container *container) HasPendingUpdates() (bool, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "HasPendingUpdates"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return false, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(pendingUpdatesQuery)
|
||||
if err != nil {
|
||||
return false, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.AreUpdatesPending, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Statistics returns statistics for the container
|
||||
// Statistics returns statistics for the container. This is a legacy v1 call
|
||||
func (container *container) Statistics() (Statistics, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Statistics"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(statisticsQuery)
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeStatistics)
|
||||
if err != nil {
|
||||
return Statistics{}, makeContainerError(container, operation, "", err)
|
||||
return Statistics{}, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.Statistics, nil
|
||||
}
|
||||
|
||||
// ProcessList returns an array of ProcessListItems for the container
|
||||
// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call
|
||||
func (container *container) ProcessList() ([]ProcessListItem, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "ProcessList"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(processListQuery)
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeProcessList)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.ProcessList, nil
|
||||
}
|
||||
|
||||
// MappedVirtualDisks returns a map of the controllers and the disks mapped
|
||||
// to a container.
|
||||
//
|
||||
// Example of JSON returned by the query.
|
||||
//{
|
||||
// "Id":"1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3_svm",
|
||||
// "SystemType":"Container",
|
||||
// "RuntimeOsType":"Linux",
|
||||
// "RuntimeId":"00000000-0000-0000-0000-000000000000",
|
||||
// "State":"Running",
|
||||
// "MappedVirtualDiskControllers":{
|
||||
// "0":{
|
||||
// "MappedVirtualDisks":{
|
||||
// "2":{
|
||||
// "HostPath":"C:\\lcow\\lcow\\scratch\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3.vhdx",
|
||||
// "ContainerPath":"/mnt/gcs/LinuxServiceVM/scratch",
|
||||
// "Lun":2,
|
||||
// "CreateInUtilityVM":true
|
||||
// },
|
||||
// "3":{
|
||||
// "HostPath":"C:\\lcow\\lcow\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3\\sandbox.vhdx",
|
||||
// "Lun":3,
|
||||
// "CreateInUtilityVM":true,
|
||||
// "AttachOnly":true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
// This is a legacy v1 call
|
||||
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "MappedVirtualDiskList"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(mappedVirtualDiskQuery)
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.MappedVirtualDiskControllers, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the container. This feature is not enabled in TP5.
|
||||
func (container *container) Pause() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Pause"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsPauseComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the execution of the container. This feature is not enabled in TP5.
|
||||
func (container *container) Resume() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Resume"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsResumeComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the container.
|
||||
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "CreateProcess"
|
||||
title := "HCSShim::Container::" + operation
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
// If we are not emulating a console, ignore any console size passed to us
|
||||
if !c.EmulateConsole {
|
||||
c.ConsoleSize[0] = 0
|
||||
c.ConsoleSize[1] = 0
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
p, err := container.system.CreateProcess(c)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
|
||||
|
||||
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, configuration, err)
|
||||
}
|
||||
|
||||
process := &process{
|
||||
handle: processHandle,
|
||||
processID: int(processInfo.ProcessId),
|
||||
container: container,
|
||||
cachedPipes: &cachedPipes{
|
||||
stdIn: processInfo.StdInput,
|
||||
stdOut: processInfo.StdOutput,
|
||||
stdErr: processInfo.StdError,
|
||||
},
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID)
|
||||
return process, nil
|
||||
return &process{p}, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the container.
|
||||
func (container *container) OpenProcess(pid int) (Process, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "OpenProcess"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
|
||||
var (
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
p, err := container.system.OpenProcess(pid)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
process := &process{
|
||||
handle: processHandle,
|
||||
processID: pid,
|
||||
container: container,
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
|
||||
return process, nil
|
||||
return &process{p}, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the container but does not terminate or wait for it.
|
||||
func (container *container) Close() error {
|
||||
container.handleLock.Lock()
|
||||
defer container.handleLock.Unlock()
|
||||
operation := "Close"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
// Don't double free this
|
||||
if container.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := container.unregisterCallback(); err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
if err := hcsCloseComputeSystem(container.handle); err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
container.handle = 0
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Close(), container)
|
||||
}
|
||||
|
||||
func (container *container) registerCallback() error {
|
||||
context := ¬ifcationWatcherContext{
|
||||
channels: newChannels(),
|
||||
}
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = context
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
var callbackHandle hcsCallback
|
||||
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context.handle = callbackHandle
|
||||
container.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *container) unregisterCallback() error {
|
||||
callbackNumber := container.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := context.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hcsUnregisterComputeSystemCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := hcsUnregisterComputeSystemCallback(handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(context.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackMap[callbackNumber] = nil
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
handle = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modifies the System by sending a request to HCS
|
||||
// Modify the System
|
||||
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Modify"
|
||||
title := "HCSShim::Container::" + operation
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestString := string(requestJSON)
|
||||
logrus.Debugf(title+" id=%s request=%s", container.id, requestString)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyComputeSystem(container.handle, requestString, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
return convertSystemError(container.system.Modify(config), container)
|
||||
}
|
||||
|
||||
27
vendor/github.com/Microsoft/hcsshim/createlayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/createlayer.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
|
||||
// the parent layer provided.
|
||||
func CreateLayer(info DriverInfo, id, parent string) error {
|
||||
title := "hcsshim::CreateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = createLayer(&infop, id, parent)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
|
||||
return nil
|
||||
}
|
||||
35
vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go
generated
vendored
35
vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
|
||||
// This requires both the id of the direct parent layer, as well as the full list
|
||||
// of paths to all parent layers up to the base (and including the direct parent
|
||||
// whose id was provided).
|
||||
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::CreateSandboxLayer "
|
||||
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = createSandboxLayer(&infop, layerId, parentId, layers)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
|
||||
return nil
|
||||
}
|
||||
26
vendor/github.com/Microsoft/hcsshim/deactivatelayer.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/deactivatelayer.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
|
||||
func DeactivateLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::DeactivateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = deactivateLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
|
||||
return nil
|
||||
}
|
||||
27
vendor/github.com/Microsoft/hcsshim/destroylayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/destroylayer.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// DestroyLayer will remove the on-disk files representing the layer with the given
|
||||
// id, including that layer's containing folder, if any.
|
||||
func DestroyLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::DestroyLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = destroyLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
|
||||
return nil
|
||||
}
|
||||
128
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
128
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
@ -1,92 +1,83 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
|
||||
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
|
||||
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist
|
||||
ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrElementNotFound = syscall.Errno(0x490)
|
||||
ErrElementNotFound = hcs.ErrElementNotFound
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrNotSupported = syscall.Errno(0x32)
|
||||
ErrNotSupported = hcs.ErrNotSupported
|
||||
|
||||
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
|
||||
// decimal -2147024883 / hex 0x8007000d
|
||||
ErrInvalidData = syscall.Errno(0xd)
|
||||
ErrInvalidData = hcs.ErrInvalidData
|
||||
|
||||
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
|
||||
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
|
||||
ErrHandleClose = hcs.ErrHandleClose
|
||||
|
||||
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
|
||||
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
|
||||
ErrAlreadyClosed = hcs.ErrAlreadyClosed
|
||||
|
||||
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
|
||||
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
|
||||
ErrInvalidNotificationType = hcs.ErrInvalidNotificationType
|
||||
|
||||
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
|
||||
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
|
||||
ErrInvalidProcessState = hcs.ErrInvalidProcessState
|
||||
|
||||
// ErrTimeout is an error encountered when waiting on a notification times out
|
||||
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
|
||||
ErrTimeout = hcs.ErrTimeout
|
||||
|
||||
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
|
||||
// a different expected notification
|
||||
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
|
||||
ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit
|
||||
|
||||
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
|
||||
// is lost while waiting for a notification
|
||||
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
|
||||
ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort
|
||||
|
||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
|
||||
ErrUnexpectedValue = hcs.ErrUnexpectedValue
|
||||
|
||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
|
||||
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
|
||||
|
||||
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
|
||||
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
|
||||
ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending
|
||||
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
|
||||
ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
ErrProcNotFound = syscall.Errno(0x7f)
|
||||
ErrProcNotFound = hcs.ErrProcNotFound
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
|
||||
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
|
||||
ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied
|
||||
|
||||
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
|
||||
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
|
||||
ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON
|
||||
|
||||
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
|
||||
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
|
||||
ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage
|
||||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||
ErrPlatformNotSupported = hcs.ErrPlatformNotSupported
|
||||
)
|
||||
|
||||
type EndpointNotFoundError struct {
|
||||
EndpointName string
|
||||
}
|
||||
|
||||
func (e EndpointNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Endpoint %s not found", e.EndpointName)
|
||||
}
|
||||
|
||||
type NetworkNotFoundError struct {
|
||||
NetworkName string
|
||||
}
|
||||
|
||||
func (e NetworkNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Network %s not found", e.NetworkName)
|
||||
}
|
||||
type EndpointNotFoundError = hns.EndpointNotFoundError
|
||||
type NetworkNotFoundError = hns.NetworkNotFoundError
|
||||
|
||||
// ProcessError is an error encountered in HCS during an operation on a Process object
|
||||
type ProcessError struct {
|
||||
@ -94,6 +85,7 @@ type ProcessError struct {
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
Events []hcs.ErrorEvent
|
||||
}
|
||||
|
||||
// ContainerError is an error encountered in HCS during an operation on a Container object
|
||||
@ -102,6 +94,7 @@ type ContainerError struct {
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
Events []hcs.ErrorEvent
|
||||
}
|
||||
|
||||
func (e *ContainerError) Error() string {
|
||||
@ -113,7 +106,7 @@ func (e *ContainerError) Error() string {
|
||||
return "unexpected nil container for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := "container " + e.Container.id
|
||||
s := "container " + e.Container.system.ID()
|
||||
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
@ -123,11 +116,15 @@ func (e *ContainerError) Error() string {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
|
||||
if e.ExtraInfo != "" {
|
||||
s += " extra info: " + e.ExtraInfo
|
||||
}
|
||||
@ -153,12 +150,7 @@ func (e *ProcessError) Error() string {
|
||||
return "Unexpected nil process for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("process %d", e.Process.processID)
|
||||
|
||||
if e.Process.container != nil {
|
||||
s += " in container " + e.Process.container.id
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID())
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
@ -167,11 +159,15 @@ func (e *ProcessError) Error() string {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
@ -189,37 +185,31 @@ func makeProcessError(process *process, operation string, extraInfo string, err
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
err = getInnerError(err)
|
||||
if _, ok := err.(EndpointNotFoundError); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(NetworkNotFoundError); ok {
|
||||
return true
|
||||
}
|
||||
return err == ErrComputeSystemDoesNotExist ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
return hcs.IsNotExist(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
// already closed by a call to the Close() method.
|
||||
func IsAlreadyClosed(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrAlreadyClosed
|
||||
return hcs.IsAlreadyClosed(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsPending returns a boolean indicating whether the error is that
|
||||
// the requested operation is being completed in the background.
|
||||
func IsPending(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeOperationPending
|
||||
return hcs.IsPending(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||
// a timeout waiting for the operation to complete.
|
||||
func IsTimeout(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrTimeout
|
||||
return hcs.IsTimeout(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
||||
@ -228,10 +218,7 @@ func IsTimeout(err error) bool {
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeAlreadyStopped ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
return hcs.IsAlreadyStopped(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||
@ -240,12 +227,7 @@ func IsAlreadyStopped(err error) bool {
|
||||
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
|
||||
// is thrown from the Platform
|
||||
func IsNotSupported(err error) bool {
|
||||
err = getInnerError(err)
|
||||
// If Platform doesn't recognize or support the request sent, below errors are seen
|
||||
return err == ErrVmcomputeInvalidJSON ||
|
||||
err == ErrInvalidData ||
|
||||
err == ErrNotSupported ||
|
||||
err == ErrVmcomputeUnknownMessage
|
||||
return hcs.IsNotSupported(getInnerError(err))
|
||||
}
|
||||
|
||||
func getInnerError(err error) error {
|
||||
@ -259,3 +241,17 @@ func getInnerError(err error) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func convertSystemError(err error, c *container) error {
|
||||
if serr, ok := err.(*hcs.SystemError); ok {
|
||||
return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func convertProcessError(err error, p *process) error {
|
||||
if perr, ok := err.(*hcs.ProcessError); ok {
|
||||
return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
26
vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// ExpandSandboxSize expands the size of a layer to at least size bytes.
|
||||
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
|
||||
title := "hcsshim::ExpandSandboxSize "
|
||||
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = expandSandboxSize(&infop, layerId, size)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
|
||||
return nil
|
||||
}
|
||||
19
vendor/github.com/Microsoft/hcsshim/guid.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/guid.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type GUID [16]byte
|
||||
|
||||
func NewGUID(source string) *GUID {
|
||||
h := sha1.Sum([]byte(source))
|
||||
var g GUID
|
||||
copy(g[0:], h[0:16])
|
||||
return &g
|
||||
}
|
||||
|
||||
func (g *GUID) ToString() string {
|
||||
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
|
||||
}
|
||||
146
vendor/github.com/Microsoft/hcsshim/hcsshim.go
generated
vendored
146
vendor/github.com/Microsoft/hcsshim/hcsshim.go
generated
vendored
@ -4,80 +4,20 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go
|
||||
|
||||
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
|
||||
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
|
||||
|
||||
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
|
||||
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
|
||||
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
|
||||
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
|
||||
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
|
||||
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
|
||||
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
|
||||
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
|
||||
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
|
||||
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
|
||||
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
|
||||
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
|
||||
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
|
||||
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
|
||||
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
|
||||
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
|
||||
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
|
||||
|
||||
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
|
||||
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
|
||||
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
|
||||
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
|
||||
|
||||
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
|
||||
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
|
||||
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
|
||||
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
|
||||
|
||||
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
|
||||
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
|
||||
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
|
||||
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
|
||||
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
|
||||
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
|
||||
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
|
||||
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
|
||||
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
|
||||
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
|
||||
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
|
||||
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
|
||||
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
|
||||
|
||||
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
|
||||
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
|
||||
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
|
||||
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
|
||||
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
|
||||
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
|
||||
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
|
||||
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
|
||||
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
|
||||
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
|
||||
|
||||
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
|
||||
|
||||
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
|
||||
|
||||
const (
|
||||
// Specific user-visible exit codes
|
||||
WaitErrExecFailed = 32767
|
||||
|
||||
ERROR_GEN_FAILURE = syscall.Errno(31)
|
||||
ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE
|
||||
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
|
||||
WSAEINVAL = syscall.Errno(10022)
|
||||
|
||||
@ -85,82 +25,4 @@ const (
|
||||
TimeoutInfinite = 0xFFFFFFFF
|
||||
)
|
||||
|
||||
type HcsError struct {
|
||||
title string
|
||||
rest string
|
||||
Err error
|
||||
}
|
||||
|
||||
type hcsSystem syscall.Handle
|
||||
type hcsProcess syscall.Handle
|
||||
type hcsCallback syscall.Handle
|
||||
|
||||
type hcsProcessInformation struct {
|
||||
ProcessId uint32
|
||||
Reserved uint32
|
||||
StdInput syscall.Handle
|
||||
StdOutput syscall.Handle
|
||||
StdError syscall.Handle
|
||||
}
|
||||
|
||||
func makeError(err error, title, rest string) error {
|
||||
// Pass through DLL errors directly since they do not originate from HCS.
|
||||
if _, ok := err.(*syscall.DLLError); ok {
|
||||
return err
|
||||
}
|
||||
return &HcsError{title, rest, err}
|
||||
}
|
||||
|
||||
func makeErrorf(err error, title, format string, a ...interface{}) error {
|
||||
return makeError(err, title, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func win32FromError(err error) uint32 {
|
||||
if herr, ok := err.(*HcsError); ok {
|
||||
return win32FromError(herr.Err)
|
||||
}
|
||||
if code, ok := err.(syscall.Errno); ok {
|
||||
return uint32(code)
|
||||
}
|
||||
return uint32(ERROR_GEN_FAILURE)
|
||||
}
|
||||
|
||||
func win32FromHresult(hr uintptr) uintptr {
|
||||
if hr&0x1fff0000 == 0x00070000 {
|
||||
return hr & 0xffff
|
||||
}
|
||||
return hr
|
||||
}
|
||||
|
||||
func (e *HcsError) Error() string {
|
||||
s := e.title
|
||||
if len(s) > 0 && s[len(s)-1] != ' ' {
|
||||
s += " "
|
||||
}
|
||||
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
if e.rest != "" {
|
||||
if e.rest[0] != ' ' {
|
||||
s += " "
|
||||
}
|
||||
s += e.rest
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func convertAndFreeCoTaskMemString(buffer *uint16) string {
|
||||
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
|
||||
coTaskMemFree(unsafe.Pointer(buffer))
|
||||
return str
|
||||
}
|
||||
|
||||
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
|
||||
return []byte(convertAndFreeCoTaskMemString(buffer))
|
||||
}
|
||||
|
||||
func processHcsResult(err error, resultp *uint16) error {
|
||||
if resultp != nil {
|
||||
result := convertAndFreeCoTaskMemString(resultp)
|
||||
logrus.Debugf("Result: %s", result)
|
||||
}
|
||||
return err
|
||||
}
|
||||
type HcsError = hcserror.HcsError
|
||||
|
||||
248
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
248
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
@ -1,29 +1,11 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
)
|
||||
|
||||
// HNSEndpoint represents a network endpoint in HNS
|
||||
type HNSEndpoint struct {
|
||||
Id string `json:"ID,omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
VirtualNetwork string `json:",omitempty"`
|
||||
VirtualNetworkName string `json:",omitempty"`
|
||||
Policies []json.RawMessage `json:",omitempty"`
|
||||
MacAddress string `json:",omitempty"`
|
||||
IPAddress net.IP `json:",omitempty"`
|
||||
DNSSuffix string `json:",omitempty"`
|
||||
DNSServerList string `json:",omitempty"`
|
||||
GatewayAddress string `json:",omitempty"`
|
||||
EnableInternalDNS bool `json:",omitempty"`
|
||||
DisableICC bool `json:",omitempty"`
|
||||
PrefixLength uint8 `json:",omitempty"`
|
||||
IsRemoteEndpoint bool `json:",omitempty"`
|
||||
}
|
||||
type HNSEndpoint = hns.HNSEndpoint
|
||||
|
||||
//SystemType represents the type of the system on which actions are done
|
||||
type SystemType string
|
||||
@ -37,39 +19,19 @@ const (
|
||||
|
||||
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
|
||||
// Supported resource types are Network and Request Types are Add/Remove
|
||||
type EndpointAttachDetachRequest struct {
|
||||
ContainerID string `json:"ContainerId,omitempty"`
|
||||
SystemType SystemType `json:"SystemType"`
|
||||
CompartmentID uint16 `json:"CompartmentId,omitempty"`
|
||||
VirtualNICName string `json:"VirtualNicName,omitempty"`
|
||||
}
|
||||
type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest
|
||||
|
||||
// EndpointResquestResponse is object to get the endpoint request response
|
||||
type EndpointResquestResponse struct {
|
||||
Success bool
|
||||
Error string
|
||||
}
|
||||
type EndpointResquestResponse = hns.EndpointResquestResponse
|
||||
|
||||
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
|
||||
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
|
||||
endpoint := &HNSEndpoint{}
|
||||
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
return hns.HNSEndpointRequest(method, path, request)
|
||||
}
|
||||
|
||||
// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
|
||||
func HNSListEndpointRequest() ([]HNSEndpoint, error) {
|
||||
var endpoint []HNSEndpoint
|
||||
err := hnsCall("GET", "/endpoints/", "", &endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
return hns.HNSListEndpointRequest()
|
||||
}
|
||||
|
||||
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
|
||||
@ -120,204 +82,10 @@ func modifyNetworkEndpoint(containerID string, endpointID string, request Reques
|
||||
|
||||
// GetHNSEndpointByID get the Endpoint by ID
|
||||
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
|
||||
return HNSEndpointRequest("GET", endpointID, "")
|
||||
return hns.GetHNSEndpointByID(endpointID)
|
||||
}
|
||||
|
||||
// GetHNSEndpointByName gets the endpoint filtered by Name
|
||||
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
|
||||
hnsResponse, err := HNSListEndpointRequest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, hnsEndpoint := range hnsResponse {
|
||||
if hnsEndpoint.Name == endpointName {
|
||||
return &hnsEndpoint, nil
|
||||
}
|
||||
}
|
||||
return nil, EndpointNotFoundError{EndpointName: endpointName}
|
||||
}
|
||||
|
||||
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
|
||||
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
|
||||
operation := "Create"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
jsonString, err := json.Marshal(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return HNSEndpointRequest("POST", "", string(jsonString))
|
||||
}
|
||||
|
||||
// Delete Endpoint by sending EndpointRequest to HNS
|
||||
func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
|
||||
operation := "Delete"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
return HNSEndpointRequest("DELETE", endpoint.Id, "")
|
||||
}
|
||||
|
||||
// Update Endpoint
|
||||
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
|
||||
operation := "Update"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
jsonString, err := json.Marshal(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
|
||||
|
||||
return endpoint, err
|
||||
}
|
||||
|
||||
// ContainerHotAttach attaches an endpoint to a running container
|
||||
func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
|
||||
operation := "ContainerHotAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
|
||||
|
||||
return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
|
||||
}
|
||||
|
||||
// ContainerHotDetach detaches an endpoint from a running container
|
||||
func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
|
||||
operation := "ContainerHotDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
|
||||
|
||||
return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
|
||||
}
|
||||
|
||||
// ApplyACLPolicy applies a set of ACL Policies on the Endpoint
|
||||
func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
|
||||
operation := "ApplyACLPolicy"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
for _, policy := range policies {
|
||||
if policy == nil {
|
||||
continue
|
||||
}
|
||||
jsonString, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint.Policies = append(endpoint.Policies, jsonString)
|
||||
}
|
||||
|
||||
_, err := endpoint.Update()
|
||||
return err
|
||||
}
|
||||
|
||||
// ContainerAttach attaches an endpoint to container
|
||||
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
|
||||
operation := "ContainerAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
ContainerID: containerID,
|
||||
CompartmentID: compartmentID,
|
||||
SystemType: ContainerType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// ContainerDetach detaches an endpoint from container
|
||||
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
|
||||
operation := "ContainerDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
ContainerID: containerID,
|
||||
SystemType: ContainerType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// HostAttach attaches a nic on the host
|
||||
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
|
||||
operation := "HostAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
CompartmentID: compartmentID,
|
||||
SystemType: HostType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
|
||||
}
|
||||
|
||||
// HostDetach detaches a nic on the host
|
||||
func (endpoint *HNSEndpoint) HostDetach() error {
|
||||
operation := "HostDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
SystemType: HostType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
|
||||
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
|
||||
operation := "VirtualMachineNicAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
VirtualNICName: virtualMachineNICName,
|
||||
SystemType: VirtualMachineType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
|
||||
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
|
||||
operation := "VirtualMachineNicDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
SystemType: VirtualMachineType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
return hns.GetHNSEndpointByName(endpointName)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user