vendor: github.com/moby/moby/api master, moby/client master
Signed-off-by: Austin Vazquez <austin.vazquez@docker.com>
This commit is contained in:
@ -3,33 +3,18 @@ package container
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"reflect"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/moby/moby/client"
|
||||
)
|
||||
|
||||
func mockContainerExportResult(content string) client.ContainerExportResult {
|
||||
out := client.ContainerExportResult{}
|
||||
|
||||
// Set unexported field "rc"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("rc")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
func mockContainerLogsResult(content string) client.ContainerLogsResult {
|
||||
out := client.ContainerLogsResult{}
|
||||
|
||||
// Set unexported field "rc"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("rc")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
type fakeStreamResult struct {
|
||||
@ -147,7 +132,7 @@ func (f *fakeClient) ContainerLogs(_ context.Context, containerID string, option
|
||||
if f.logFunc != nil {
|
||||
return f.logFunc(containerID, options)
|
||||
}
|
||||
return client.ContainerLogsResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ClientVersion() string {
|
||||
@ -172,7 +157,7 @@ func (f *fakeClient) ContainerExport(_ context.Context, containerID string, _ cl
|
||||
if f.containerExportFunc != nil {
|
||||
return f.containerExportFunc(containerID)
|
||||
}
|
||||
return client.ContainerExportResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ExecResize(_ context.Context, id string, options client.ExecResizeOptions) (client.ExecResizeResult, error) {
|
||||
@ -189,7 +174,7 @@ func (f *fakeClient) ContainerKill(ctx context.Context, containerID string, opti
|
||||
return client.ContainerKillResult{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainersPrune(ctx context.Context, options client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
func (f *fakeClient) ContainerPrune(ctx context.Context, options client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
if f.containerPruneFunc != nil {
|
||||
return f.containerPruneFunc(ctx, options)
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
return err
|
||||
}
|
||||
if !options.Detach {
|
||||
if err := dockerCLI.In().CheckTty(execOptions.AttachStdin, execOptions.Tty); err != nil {
|
||||
if err := dockerCLI.In().CheckTty(execOptions.AttachStdin, execOptions.TTY); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -119,17 +119,10 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
}
|
||||
|
||||
if options.Detach {
|
||||
var cs client.ConsoleSize
|
||||
if execOptions.ConsoleSize != nil {
|
||||
cs = client.ConsoleSize{
|
||||
Height: execOptions.ConsoleSize[0],
|
||||
Width: execOptions.ConsoleSize[1],
|
||||
}
|
||||
}
|
||||
_, err := apiClient.ExecStart(ctx, execID, client.ExecStartOptions{
|
||||
Detach: options.Detach,
|
||||
TTY: execOptions.Tty,
|
||||
ConsoleSize: cs,
|
||||
TTY: execOptions.TTY,
|
||||
ConsoleSize: client.ConsoleSize{Height: execOptions.ConsoleSize.Height, Width: execOptions.ConsoleSize.Width},
|
||||
})
|
||||
return err
|
||||
}
|
||||
@ -137,9 +130,9 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
}
|
||||
|
||||
func fillConsoleSize(execOptions *client.ExecCreateOptions, dockerCli command.Cli) {
|
||||
if execOptions.Tty {
|
||||
if execOptions.TTY {
|
||||
height, width := dockerCli.Out().GetTtySize()
|
||||
execOptions.ConsoleSize = &[2]uint{height, width}
|
||||
execOptions.ConsoleSize = client.ConsoleSize{Height: height, Width: width}
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,7 +150,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if execOptions.AttachStderr {
|
||||
if execOptions.Tty {
|
||||
if execOptions.TTY {
|
||||
stderr = dockerCli.Out()
|
||||
} else {
|
||||
stderr = dockerCli.Err()
|
||||
@ -166,16 +159,9 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
fillConsoleSize(execOptions, dockerCli)
|
||||
|
||||
apiClient := dockerCli.Client()
|
||||
var cs client.ConsoleSize
|
||||
if execOptions.ConsoleSize != nil {
|
||||
cs = client.ConsoleSize{
|
||||
Height: execOptions.ConsoleSize[0],
|
||||
Width: execOptions.ConsoleSize[1],
|
||||
}
|
||||
}
|
||||
resp, err := apiClient.ExecAttach(ctx, execID, client.ExecAttachOptions{
|
||||
TTY: execOptions.Tty,
|
||||
ConsoleSize: cs,
|
||||
TTY: execOptions.TTY,
|
||||
ConsoleSize: client.ConsoleSize{Height: execOptions.ConsoleSize.Height, Width: execOptions.ConsoleSize.Width},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -193,7 +179,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
outputStream: out,
|
||||
errorStream: stderr,
|
||||
resp: resp.HijackedResponse,
|
||||
tty: execOptions.Tty,
|
||||
tty: execOptions.TTY,
|
||||
detachKeys: execOptions.DetachKeys,
|
||||
}
|
||||
|
||||
@ -201,7 +187,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
}()
|
||||
}()
|
||||
|
||||
if execOptions.Tty && dockerCli.In().IsTerminal() {
|
||||
if execOptions.TTY && dockerCli.In().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil {
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
||||
}
|
||||
@ -237,7 +223,7 @@ func parseExec(execOpts ExecOptions, configFile *configfile.ConfigFile) (*client
|
||||
execOptions := &client.ExecCreateOptions{
|
||||
User: execOpts.User,
|
||||
Privileged: execOpts.Privileged,
|
||||
Tty: execOpts.TTY,
|
||||
TTY: execOpts.TTY,
|
||||
Cmd: execOpts.Command,
|
||||
WorkingDir: execOpts.Workdir,
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ TWO=2
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: true,
|
||||
TTY: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
},
|
||||
@ -86,7 +86,7 @@ TWO=2
|
||||
Detach: true,
|
||||
}),
|
||||
expected: client.ExecCreateOptions{
|
||||
Tty: true,
|
||||
TTY: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
},
|
||||
|
||||
@ -75,7 +75,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().ContainersPrune(ctx, client.ContainerPruneOptions{
|
||||
res, err := dockerCli.Client().ContainerPrune(ctx, client.ContainerPruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -51,7 +51,7 @@ shared: {{.Shared}}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func buildCacheSort(buildCache []*build.CacheRecord) {
|
||||
func buildCacheSort(buildCache []build.CacheRecord) {
|
||||
sort.Slice(buildCache, func(i, j int) bool {
|
||||
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
|
||||
switch {
|
||||
@ -70,7 +70,7 @@ func buildCacheSort(buildCache []*build.CacheRecord) {
|
||||
}
|
||||
|
||||
// BuildCacheWrite renders the context for a list of containers
|
||||
func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error {
|
||||
func BuildCacheWrite(ctx Context, buildCaches []build.CacheRecord) error {
|
||||
render := func(format func(subContext SubContext) error) error {
|
||||
buildCacheSort(buildCaches)
|
||||
for _, bc := range buildCaches {
|
||||
@ -87,7 +87,7 @@ func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error {
|
||||
type buildCacheContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
v *build.CacheRecord
|
||||
v build.CacheRecord
|
||||
}
|
||||
|
||||
func newBuildCacheContext() *buildCacheContext {
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/api/types/image"
|
||||
"github.com/moby/moby/api/types/volume"
|
||||
"github.com/moby/moby/client"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -33,13 +34,12 @@ const (
|
||||
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
|
||||
type DiskUsageContext struct {
|
||||
Context
|
||||
Verbose bool
|
||||
LayersSize int64
|
||||
Images []*image.Summary
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*build.CacheRecord
|
||||
BuilderSize int64
|
||||
Verbose bool
|
||||
|
||||
ImageDiskUsage client.ImagesDiskUsage
|
||||
BuildCacheDiskUsage client.BuildCacheDiskUsage
|
||||
ContainerDiskUsage client.ContainersDiskUsage
|
||||
VolumeDiskUsage client.VolumesDiskUsage
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
|
||||
@ -96,35 +96,49 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
|
||||
totalSize: ctx.LayersSize,
|
||||
images: ctx.Images,
|
||||
totalCount: ctx.ImageDiskUsage.TotalImages,
|
||||
activeCount: ctx.ImageDiskUsage.ActiveImages,
|
||||
totalSize: ctx.ImageDiskUsage.TotalSize,
|
||||
reclaimable: ctx.ImageDiskUsage.Reclaimable,
|
||||
images: ctx.ImageDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
|
||||
containers: ctx.Containers,
|
||||
totalCount: ctx.ContainerDiskUsage.TotalContainers,
|
||||
activeCount: ctx.ContainerDiskUsage.ActiveContainers,
|
||||
totalSize: ctx.ContainerDiskUsage.TotalSize,
|
||||
reclaimable: ctx.ContainerDiskUsage.Reclaimable,
|
||||
containers: ctx.ContainerDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
|
||||
volumes: ctx.Volumes,
|
||||
totalCount: ctx.VolumeDiskUsage.TotalVolumes,
|
||||
activeCount: ctx.VolumeDiskUsage.ActiveVolumes,
|
||||
totalSize: ctx.VolumeDiskUsage.TotalSize,
|
||||
reclaimable: ctx.VolumeDiskUsage.Reclaimable,
|
||||
volumes: ctx.VolumeDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{
|
||||
builderSize: ctx.BuilderSize,
|
||||
buildCache: ctx.BuildCache,
|
||||
totalCount: ctx.BuildCacheDiskUsage.TotalBuildCacheRecords,
|
||||
activeCount: ctx.BuildCacheDiskUsage.ActiveBuildCacheRecords,
|
||||
builderSize: ctx.BuildCacheDiskUsage.TotalSize,
|
||||
reclaimable: ctx.BuildCacheDiskUsage.Reclaimable,
|
||||
buildCache: ctx.BuildCacheDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []*container.Summary{}}
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []container.Summary{}}
|
||||
diskUsageContainersCtx.Header = SubHeaderContext{
|
||||
"Type": typeHeader,
|
||||
"TotalCount": totalHeader,
|
||||
@ -146,18 +160,18 @@ type diskUsageContext struct {
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
duc := &diskUsageContext{
|
||||
Images: make([]*imageContext, 0, len(ctx.Images)),
|
||||
Containers: make([]*ContainerContext, 0, len(ctx.Containers)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
|
||||
Images: make([]*imageContext, 0, len(ctx.ImageDiskUsage.Items)),
|
||||
Containers: make([]*ContainerContext, 0, len(ctx.ContainerDiskUsage.Items)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.VolumeDiskUsage.Items)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCacheDiskUsage.Items)),
|
||||
}
|
||||
trunc := ctx.Format.IsTable()
|
||||
|
||||
// First images
|
||||
for _, i := range ctx.Images {
|
||||
for _, i := range ctx.ImageDiskUsage.Items {
|
||||
repo := "<none>"
|
||||
tag := "<none>"
|
||||
if len(i.RepoTags) > 0 && !isDangling(*i) {
|
||||
if len(i.RepoTags) > 0 && !isDangling(i) {
|
||||
// Only show the first tag
|
||||
ref, err := reference.ParseNormalizedNamed(i.RepoTags[0])
|
||||
if err != nil {
|
||||
@ -173,25 +187,25 @@ func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
trunc: trunc,
|
||||
i: *i,
|
||||
i: i,
|
||||
})
|
||||
}
|
||||
|
||||
// Now containers
|
||||
for _, c := range ctx.Containers {
|
||||
for _, c := range ctx.ContainerDiskUsage.Items {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: *c})
|
||||
duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: c})
|
||||
}
|
||||
|
||||
// And volumes
|
||||
for _, v := range ctx.Volumes {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
|
||||
for _, v := range ctx.VolumeDiskUsage.Items {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: v})
|
||||
}
|
||||
|
||||
// And build cache
|
||||
buildCacheSort(ctx.BuildCache)
|
||||
for _, v := range ctx.BuildCache {
|
||||
buildCacheSort(ctx.BuildCacheDiskUsage.Items)
|
||||
for _, v := range ctx.BuildCacheDiskUsage.Items {
|
||||
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
|
||||
}
|
||||
|
||||
@ -248,7 +262,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
_, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuildCacheDiskUsage.TotalSize)))
|
||||
for _, v := range duc.BuildCache {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
@ -261,8 +275,11 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
|
||||
type diskUsageImagesContext struct {
|
||||
HeaderContext
|
||||
totalSize int64
|
||||
images []*image.Summary
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
images []image.Summary
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) {
|
||||
@ -274,18 +291,11 @@ func (*diskUsageImagesContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.images))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Active() string {
|
||||
used := 0
|
||||
for _, i := range c.images {
|
||||
if i.Containers > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Size() string {
|
||||
@ -293,27 +303,19 @@ func (c *diskUsageImagesContext) Size() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Reclaimable() string {
|
||||
var used int64
|
||||
|
||||
for _, i := range c.images {
|
||||
if i.Containers != 0 {
|
||||
if i.Size == -1 || i.SharedSize == -1 {
|
||||
continue
|
||||
}
|
||||
used += i.Size - i.SharedSize
|
||||
}
|
||||
}
|
||||
|
||||
reclaimable := c.totalSize - used
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageContainersContext struct {
|
||||
HeaderContext
|
||||
containers []*container.Summary
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
containers []container.Summary
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) {
|
||||
@ -325,62 +327,32 @@ func (*diskUsageContainersContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.containers))
|
||||
}
|
||||
|
||||
func (*diskUsageContainersContext) isActive(ctr container.Summary) bool {
|
||||
switch ctr.State {
|
||||
case container.StateRunning, container.StatePaused, container.StateRestarting:
|
||||
return true
|
||||
case container.StateCreated, container.StateRemoving, container.StateExited, container.StateDead:
|
||||
return false
|
||||
default:
|
||||
// Unknown state (should never happen).
|
||||
return false
|
||||
}
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Active() string {
|
||||
used := 0
|
||||
for _, ctr := range c.containers {
|
||||
if c.isActive(*ctr) {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, ctr := range c.containers {
|
||||
size += ctr.SizeRw
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
return units.HumanSize(float64(c.totalSize))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Reclaimable() string {
|
||||
var reclaimable, totalSize int64
|
||||
|
||||
for _, ctr := range c.containers {
|
||||
if !c.isActive(*ctr) {
|
||||
reclaimable += ctr.SizeRw
|
||||
}
|
||||
totalSize += ctr.SizeRw
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageVolumesContext struct {
|
||||
HeaderContext
|
||||
volumes []*volume.Volume
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
volumes []volume.Volume
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) {
|
||||
@ -392,56 +364,32 @@ func (*diskUsageVolumesContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.volumes))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Active() string {
|
||||
used := 0
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.RefCount > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
size += v.UsageData.Size
|
||||
}
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
return units.HumanSize(float64(c.totalSize))
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Reclaimable() string {
|
||||
var reclaimable int64
|
||||
var totalSize int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
if v.UsageData.RefCount == 0 {
|
||||
reclaimable += v.UsageData.Size
|
||||
}
|
||||
totalSize += v.UsageData.Size
|
||||
}
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageBuilderContext struct {
|
||||
HeaderContext
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
builderSize int64
|
||||
buildCache []*build.CacheRecord
|
||||
reclaimable int64
|
||||
buildCache []build.CacheRecord
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
|
||||
@ -453,17 +401,11 @@ func (*diskUsageBuilderContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.buildCache))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Active() string {
|
||||
numActive := 0
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse {
|
||||
numActive++
|
||||
}
|
||||
}
|
||||
return strconv.Itoa(numActive)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Size() string {
|
||||
@ -471,12 +413,5 @@ func (c *diskUsageBuilderContext) Size() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Reclaimable() string {
|
||||
var inUseBytes int64
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse && !bc.Shared {
|
||||
inUseBytes += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(c.builderSize - inUseBytes))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@ type fakeClient struct {
|
||||
imagePushFunc func(ref string, options client.ImagePushOptions) (client.ImagePushResponse, error)
|
||||
infoFunc func() (client.SystemInfoResult, error)
|
||||
imagePullFunc func(ref string, options client.ImagePullOptions) (client.ImagePullResponse, error)
|
||||
imagesPruneFunc func(options client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
imagePruneFunc func(options client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
imageLoadFunc func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error)
|
||||
imageListFunc func(options client.ImageListOptions) (client.ImageListResult, error)
|
||||
imageInspectFunc func(img string) (client.ImageInspectResult, error)
|
||||
@ -47,7 +47,7 @@ func (cli *fakeClient) ImageSave(_ context.Context, images []string, options ...
|
||||
if cli.imageSaveFunc != nil {
|
||||
return cli.imageSaveFunc(images, options...)
|
||||
}
|
||||
return client.ImageSaveResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageRemove(_ context.Context, img string, options client.ImageRemoveOptions) (client.ImageRemoveResult, error) {
|
||||
@ -80,9 +80,9 @@ func (cli *fakeClient) ImagePull(_ context.Context, ref string, options client.I
|
||||
return fakeStreamResult{ReadCloser: http.NoBody}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImagesPrune(_ context.Context, opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
if cli.imagesPruneFunc != nil {
|
||||
return cli.imagesPruneFunc(opts)
|
||||
func (cli *fakeClient) ImagePrune(_ context.Context, opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
if cli.imagePruneFunc != nil {
|
||||
return cli.imagePruneFunc(opts)
|
||||
}
|
||||
return client.ImagePruneResult{}, nil
|
||||
}
|
||||
@ -91,7 +91,7 @@ func (cli *fakeClient) ImageLoad(_ context.Context, input io.Reader, options ...
|
||||
if cli.imageLoadFunc != nil {
|
||||
return cli.imageLoadFunc(input, options...)
|
||||
}
|
||||
return client.ImageLoadResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageList(_ context.Context, options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
@ -112,7 +112,7 @@ func (cli *fakeClient) ImageImport(_ context.Context, source client.ImageImportS
|
||||
if cli.imageImportFunc != nil {
|
||||
return cli.imageImportFunc(source, ref, options)
|
||||
}
|
||||
return client.ImageImportResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageHistory(_ context.Context, img string, options ...client.ImageHistoryOption) (client.ImageHistoryResult, error) {
|
||||
|
||||
@ -3,6 +3,7 @@ package image
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
@ -28,7 +29,7 @@ func TestNewImportCommandErrors(t *testing.T) {
|
||||
args: []string{"testdata/import-command-success.input.txt"},
|
||||
expectedError: "something went wrong",
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
return client.ImageImportResult{}, errors.New("something went wrong")
|
||||
return nil, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -68,7 +69,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"-", "image:local"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("image:local", ref))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -76,7 +77,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--message", "test message", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("test message", options.Message))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -84,7 +85,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--change", "ENV DEBUG=true", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("ENV DEBUG=true", options.Changes[0]))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -92,7 +93,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--change", "ENV DEBUG true", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("ENV DEBUG true", options.Changes[0]))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -4,10 +4,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/moby/moby/client"
|
||||
@ -39,7 +37,7 @@ func TestNewLoadCommandErrors(t *testing.T) {
|
||||
args: []string{},
|
||||
expectedError: "something went wrong",
|
||||
imageLoadFunc: func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error) {
|
||||
return client.ImageLoadResult{}, errors.New("something went wrong")
|
||||
return nil, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -47,7 +45,7 @@ func TestNewLoadCommandErrors(t *testing.T) {
|
||||
args: []string{"--platform", "<invalid>"},
|
||||
expectedError: `invalid platform`,
|
||||
imageLoadFunc: func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error) {
|
||||
return client.ImageLoadResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -75,14 +73,7 @@ func TestNewLoadCommandInvalidInput(t *testing.T) {
|
||||
}
|
||||
|
||||
func mockImageLoadResult(content string) client.ImageLoadResult {
|
||||
out := client.ImageLoadResult{}
|
||||
|
||||
// Set unexported field "body"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("body")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
func TestNewLoadCommandSuccess(t *testing.T) {
|
||||
|
||||
@ -87,7 +87,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().ImagesPrune(ctx, client.ImagePruneOptions{
|
||||
res, err := dockerCli.Client().ImagePrune(ctx, client.ImagePruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -18,10 +18,10 @@ import (
|
||||
|
||||
func TestNewPruneCommandErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectedError string
|
||||
imagesPruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
name string
|
||||
args []string
|
||||
expectedError string
|
||||
imagePruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
}{
|
||||
{
|
||||
name: "wrong-args",
|
||||
@ -32,7 +32,7 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
name: "prune-error",
|
||||
args: []string{"--force"},
|
||||
expectedError: "something went wrong",
|
||||
imagesPruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
@ -40,7 +40,7 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cmd := newPruneCommand(test.NewFakeCli(&fakeClient{
|
||||
imagesPruneFunc: tc.imagesPruneFunc,
|
||||
imagePruneFunc: tc.imagePruneFunc,
|
||||
}))
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetErr(io.Discard)
|
||||
@ -52,14 +52,14 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
|
||||
func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
imagesPruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
name string
|
||||
args []string
|
||||
imagePruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
}{
|
||||
{
|
||||
name: "all",
|
||||
args: []string{"--all"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["false"])
|
||||
return client.ImagePruneResult{}, nil
|
||||
},
|
||||
@ -67,7 +67,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "force-deleted",
|
||||
args: []string{"--force"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["true"])
|
||||
return client.ImagePruneResult{
|
||||
Report: image.PruneReport{
|
||||
@ -80,7 +80,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "label-filter",
|
||||
args: []string{"--force", "--filter", "label=foobar"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["label"]["foobar"])
|
||||
return client.ImagePruneResult{}, nil
|
||||
},
|
||||
@ -88,7 +88,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "force-untagged",
|
||||
args: []string{"--force"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["true"])
|
||||
return client.ImagePruneResult{
|
||||
Report: image.PruneReport{
|
||||
@ -101,7 +101,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc})
|
||||
cli := test.NewFakeCli(&fakeClient{imagePruneFunc: tc.imagePruneFunc})
|
||||
// when prompted, answer "Y" to confirm the prune.
|
||||
// will not be prompted if --force is used.
|
||||
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader("Y\n"))))
|
||||
@ -120,8 +120,8 @@ func TestPrunePromptTermination(t *testing.T) {
|
||||
t.Cleanup(cancel)
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
imagesPruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("fakeClient imagesPruneFunc should not be called")
|
||||
imagePruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("fakeClient imagePruneFunc should not be called")
|
||||
},
|
||||
})
|
||||
cmd := newPruneCommand(cli)
|
||||
|
||||
@ -38,7 +38,7 @@ func TestNewSaveCommandErrors(t *testing.T) {
|
||||
isTerminal: false,
|
||||
expectedError: "error saving image",
|
||||
imageSaveFunc: func(images []string, options ...client.ImageSaveOption) (client.ImageSaveResult, error) {
|
||||
return client.ImageSaveResult{}, errors.New("error saving image")
|
||||
return nil, errors.New("error saving image")
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -83,7 +83,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
imageSaveFunc: func(images []string, options ...client.ImageSaveOption) (client.ImageSaveResult, error) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
deferredFunc: func() {
|
||||
_ = os.Remove("save_tmp_file")
|
||||
@ -96,7 +96,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 2))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, is.Equal("arg2", images[1]))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -108,7 +108,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
// FIXME(thaJeztah): need to find appropriate way to test the result of "ImageHistoryWithPlatform" being applied
|
||||
assert.Check(t, len(options) > 0) // can be 1 or two depending on whether a terminal is attached :/
|
||||
// assert.Check(t, is.Contains(options, client.ImageHistoryWithPlatform(ocispec.Platform{OS: "linux", Architecture: "amd64"})))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -118,7 +118,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, len(options) > 0) // can be 1 or 2 depending on whether a terminal is attached :/
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -128,7 +128,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, len(options) > 0) // can be 1 or 2 depending on whether a terminal is attached :/
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().NetworksPrune(ctx, client.NetworkPruneOptions{
|
||||
res, err := dockerCli.Client().NetworkPrune(ctx, client.NetworkPruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -382,11 +382,11 @@ func (ctx *serviceInspectContext) UpdateDelay() time.Duration {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOnFailure() string {
|
||||
return ctx.Service.Spec.UpdateConfig.FailureAction
|
||||
return string(ctx.Service.Spec.UpdateConfig.FailureAction)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOrder() string {
|
||||
return ctx.Service.Spec.UpdateConfig.Order
|
||||
return string(ctx.Service.Spec.UpdateConfig.Order)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateMonitor() bool {
|
||||
@ -418,7 +418,7 @@ func (ctx *serviceInspectContext) RollbackDelay() time.Duration {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOnFailure() string {
|
||||
return ctx.Service.Spec.RollbackConfig.FailureAction
|
||||
return string(ctx.Service.Spec.RollbackConfig.FailureAction)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasRollbackMonitor() bool {
|
||||
@ -434,7 +434,7 @@ func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOrder() string {
|
||||
return ctx.Service.Spec.RollbackConfig.Order
|
||||
return string(ctx.Service.Spec.RollbackConfig.Order)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerImage() string {
|
||||
|
||||
@ -164,9 +164,9 @@ func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.Upda
|
||||
Parallelism: defaultUpdateConfig.Parallelism,
|
||||
Delay: defaultUpdateConfig.Delay,
|
||||
Monitor: defaultMonitor,
|
||||
FailureAction: defaultFailureAction,
|
||||
FailureAction: swarm.FailureAction(defaultFailureAction),
|
||||
MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio,
|
||||
Order: defaultOrder(defaultUpdateConfig.Order),
|
||||
Order: swarm.UpdateOrder(defaultOrder(defaultUpdateConfig.Order)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,13 +187,13 @@ func (o updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
updateConfig.FailureAction = swarm.FailureAction(o.onFailure)
|
||||
}
|
||||
if flags.Changed(flagUpdateMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
updateConfig.Order = o.order
|
||||
updateConfig.Order = swarm.UpdateOrder(o.order)
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
@ -216,13 +216,13 @@ func (o updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
updateConfig.FailureAction = swarm.FailureAction(o.onFailure)
|
||||
}
|
||||
if flags.Changed(flagRollbackMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
updateConfig.Order = o.order
|
||||
updateConfig.Order = swarm.UpdateOrder(o.order)
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
@ -299,9 +299,9 @@ func defaultRestartCondition() swarm.RestartPolicyCondition {
|
||||
func defaultOrder(order api.UpdateConfig_UpdateOrder) string {
|
||||
switch order {
|
||||
case api.UpdateConfig_STOP_FIRST:
|
||||
return swarm.UpdateOrderStopFirst
|
||||
return string(swarm.UpdateOrderStopFirst)
|
||||
case api.UpdateConfig_START_FIRST:
|
||||
return swarm.UpdateOrderStartFirst
|
||||
return string(swarm.UpdateOrderStartFirst)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -269,16 +269,16 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
flags.Set("update-parallelism", "23")
|
||||
flags.Set("update-delay", "34s")
|
||||
flags.Set("update-monitor", "54321ns")
|
||||
flags.Set("update-failure-action", swarm.UpdateFailureActionPause)
|
||||
flags.Set("update-failure-action", string(swarm.UpdateFailureActionPause))
|
||||
flags.Set("update-max-failure-ratio", "0.6")
|
||||
flags.Set("update-order", swarm.UpdateOrderStopFirst)
|
||||
flags.Set("update-order", string(swarm.UpdateOrderStopFirst))
|
||||
|
||||
flags.Set("rollback-parallelism", "12")
|
||||
flags.Set("rollback-delay", "23s")
|
||||
flags.Set("rollback-monitor", "12345ns")
|
||||
flags.Set("rollback-failure-action", swarm.UpdateFailureActionContinue)
|
||||
flags.Set("rollback-failure-action", string(swarm.UpdateFailureActionContinue))
|
||||
flags.Set("rollback-max-failure-ratio", "0.5")
|
||||
flags.Set("rollback-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("rollback-order", string(swarm.UpdateOrderStartFirst))
|
||||
|
||||
o := newServiceOptions()
|
||||
o.mode = "replicated"
|
||||
@ -286,17 +286,17 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
parallelism: 23,
|
||||
delay: 34 * time.Second,
|
||||
monitor: 54321 * time.Nanosecond,
|
||||
onFailure: swarm.UpdateFailureActionPause,
|
||||
onFailure: string(swarm.UpdateFailureActionPause),
|
||||
maxFailureRatio: 0.6,
|
||||
order: swarm.UpdateOrderStopFirst,
|
||||
order: string(swarm.UpdateOrderStopFirst),
|
||||
}
|
||||
o.rollback = updateOptions{
|
||||
parallelism: 12,
|
||||
delay: 23 * time.Second,
|
||||
monitor: 12345 * time.Nanosecond,
|
||||
onFailure: swarm.UpdateFailureActionContinue,
|
||||
onFailure: string(swarm.UpdateFailureActionContinue),
|
||||
maxFailureRatio: 0.5,
|
||||
order: swarm.UpdateOrderStartFirst,
|
||||
order: string(swarm.UpdateOrderStartFirst),
|
||||
}
|
||||
|
||||
service, err := o.ToService(context.Background(), &fakeClient{}, flags)
|
||||
@ -307,18 +307,18 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
|
||||
func TestToServiceUpdateRollbackOrder(t *testing.T) {
|
||||
flags := newCreateCommand(nil).Flags()
|
||||
flags.Set("update-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("rollback-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("update-order", string(swarm.UpdateOrderStartFirst))
|
||||
flags.Set("rollback-order", string(swarm.UpdateOrderStartFirst))
|
||||
|
||||
o := newServiceOptions()
|
||||
o.mode = "replicated"
|
||||
o.update = updateOptions{order: swarm.UpdateOrderStartFirst}
|
||||
o.rollback = updateOptions{order: swarm.UpdateOrderStartFirst}
|
||||
o.update = updateOptions{order: string(swarm.UpdateOrderStartFirst)}
|
||||
o.rollback = updateOptions{order: string(swarm.UpdateOrderStartFirst)}
|
||||
|
||||
service, err := o.ToService(context.Background(), &fakeClient{}, flags)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(service.UpdateConfig.Order, o.update.order))
|
||||
assert.Check(t, is.Equal(service.RollbackConfig.Order, o.rollback.order))
|
||||
assert.Check(t, is.Equal(string(service.UpdateConfig.Order), o.update.order))
|
||||
assert.Check(t, is.Equal(string(service.RollbackConfig.Order), o.rollback.order))
|
||||
}
|
||||
|
||||
func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) {
|
||||
|
||||
@ -228,7 +228,7 @@ func runUpdate(ctx context.Context, dockerCLI command.Cli, flags *pflag.FlagSet,
|
||||
}
|
||||
updateOpts.EncodedRegistryAuth = encodedAuth
|
||||
} else {
|
||||
registryAuthFrom = swarm.RegistryAuthFromSpec
|
||||
registryAuthFrom = string(swarm.RegistryAuthFromSpec)
|
||||
}
|
||||
|
||||
response, err := apiClient.ServiceUpdate(ctx, res.Service.ID, client.ServiceUpdateOptions{
|
||||
@ -236,7 +236,7 @@ func runUpdate(ctx context.Context, dockerCLI command.Cli, flags *pflag.FlagSet,
|
||||
Spec: *spec,
|
||||
|
||||
EncodedRegistryAuth: encodedAuth,
|
||||
RegistryAuthFrom: registryAuthFrom,
|
||||
RegistryAuthFrom: swarm.RegistryAuthSource(registryAuthFrom),
|
||||
Rollback: rollbackAction,
|
||||
})
|
||||
if err != nil {
|
||||
@ -433,9 +433,15 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism)
|
||||
updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay)
|
||||
updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor)
|
||||
updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction)
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
value, _ := flags.GetString(flagUpdateFailureAction)
|
||||
spec.UpdateConfig.FailureAction = swarm.FailureAction(value)
|
||||
}
|
||||
updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio)
|
||||
updateString(flagUpdateOrder, &spec.UpdateConfig.Order)
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
value, _ := flags.GetString(flagUpdateOrder)
|
||||
spec.UpdateConfig.Order = swarm.UpdateOrder(value)
|
||||
}
|
||||
}
|
||||
|
||||
if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
||||
@ -445,9 +451,15 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism)
|
||||
updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay)
|
||||
updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor)
|
||||
updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction)
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
value, _ := flags.GetString(flagRollbackFailureAction)
|
||||
spec.RollbackConfig.FailureAction = swarm.FailureAction(value)
|
||||
}
|
||||
updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio)
|
||||
updateString(flagRollbackOrder, &spec.RollbackConfig.Order)
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
value, _ := flags.GetString(flagRollbackOrder)
|
||||
spec.RollbackConfig.Order = swarm.UpdateOrder(value)
|
||||
}
|
||||
}
|
||||
|
||||
if flags.Changed(flagEndpointMode) {
|
||||
|
||||
@ -38,7 +38,7 @@ func (cli *fakeClient) ContainerList(ctx context.Context, options client.Contain
|
||||
return client.ContainerListResult{}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ContainersPrune(ctx context.Context, opts client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
func (cli *fakeClient) ContainerPrune(ctx context.Context, opts client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
if cli.containerPruneFunc != nil {
|
||||
return cli.containerPruneFunc(ctx, opts)
|
||||
}
|
||||
|
||||
@ -42,7 +42,9 @@ func newDiskUsageCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
|
||||
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
// TODO expose types.DiskUsageOptions.Types as flag on the command-line and/or as separate commands (docker container df / docker container usage)
|
||||
du, err := dockerCli.Client().DiskUsage(ctx, client.DiskUsageOptions{})
|
||||
du, err := dockerCli.Client().DiskUsage(ctx, client.DiskUsageOptions{
|
||||
Verbose: opts.verbose,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -52,25 +54,16 @@ func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts diskUsageOpti
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
|
||||
var bsz int64
|
||||
for _, bc := range du.BuildCache {
|
||||
if !bc.Shared {
|
||||
bsz += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
duCtx := formatter.DiskUsageContext{
|
||||
Context: formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewDiskUsageFormat(format, opts.verbose),
|
||||
},
|
||||
LayersSize: du.LayersSize,
|
||||
BuilderSize: bsz,
|
||||
BuildCache: du.BuildCache,
|
||||
Images: du.Images,
|
||||
Containers: du.Containers,
|
||||
Volumes: du.Volumes,
|
||||
Verbose: opts.verbose,
|
||||
Verbose: opts.verbose,
|
||||
ImageDiskUsage: du.Images,
|
||||
BuildCacheDiskUsage: du.BuildCache,
|
||||
ContainerDiskUsage: du.Containers,
|
||||
VolumeDiskUsage: du.Volumes,
|
||||
}
|
||||
|
||||
return duCtx.Write()
|
||||
|
||||
@ -202,9 +202,7 @@ func prettyPrintInfo(streams command.Streams, info dockerInfo) error {
|
||||
fprintln(streams.Out())
|
||||
fprintln(streams.Out(), "Server:")
|
||||
if info.Info != nil {
|
||||
for _, err := range prettyPrintServerInfo(streams, &info) {
|
||||
info.ServerErrors = append(info.ServerErrors, err.Error())
|
||||
}
|
||||
prettyPrintServerInfo(streams, &info)
|
||||
}
|
||||
for _, err := range info.ServerErrors {
|
||||
fprintln(streams.Err(), "ERROR:", err)
|
||||
@ -240,8 +238,7 @@ func prettyPrintClientInfo(streams command.Streams, info clientInfo) {
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
var errs []error
|
||||
func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) {
|
||||
output := streams.Out()
|
||||
|
||||
fprintln(output, " Containers:", info.Containers)
|
||||
@ -306,17 +303,14 @@ func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
fprintln(output, " containerd version:", info.ContainerdCommit.ID)
|
||||
fprintln(output, " runc version:", info.RuncCommit.ID)
|
||||
fprintln(output, " init version:", info.InitCommit.ID)
|
||||
if len(info.SecurityOptions) != 0 {
|
||||
if kvs, err := security.DecodeOptions(info.SecurityOptions); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
fprintln(output, " Security Options:")
|
||||
for _, so := range kvs {
|
||||
fprintln(output, " "+so.Name)
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" {
|
||||
fprintln(output, " Profile:", o.Value)
|
||||
}
|
||||
secopts := security.DecodeOptions(info.SecurityOptions)
|
||||
if len(secopts) != 0 {
|
||||
fprintln(output, " Security Options:")
|
||||
for _, so := range secopts {
|
||||
fprintln(output, " "+so.Name)
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" {
|
||||
fprintln(output, " Profile:", o.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -407,8 +401,6 @@ func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
for _, w := range info.Warnings {
|
||||
fprintln(streams.Err(), w)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
|
||||
@ -1,2 +1 @@
|
||||
ERROR: a server error occurred
|
||||
ERROR: invalid empty security option
|
||||
|
||||
@ -36,7 +36,7 @@ func (c *fakeClient) VolumeList(_ context.Context, options client.VolumeListOpti
|
||||
return client.VolumeListResult{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeClient) VolumesPrune(_ context.Context, opts client.VolumePruneOptions) (client.VolumePruneResult, error) {
|
||||
func (c *fakeClient) VolumePrune(_ context.Context, opts client.VolumePruneOptions) (client.VolumePruneResult, error) {
|
||||
if c.volumePruneFunc != nil {
|
||||
return c.volumePruneFunc(opts)
|
||||
}
|
||||
|
||||
@ -90,7 +90,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().VolumesPrune(ctx, client.VolumePruneOptions{
|
||||
res, err := dockerCli.Client().VolumePrune(ctx, client.VolumePruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -507,10 +507,10 @@ func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig
|
||||
return &swarm.UpdateConfig{
|
||||
Parallelism: parallel,
|
||||
Delay: time.Duration(source.Delay),
|
||||
FailureAction: source.FailureAction,
|
||||
FailureAction: swarm.FailureAction(source.FailureAction),
|
||||
Monitor: time.Duration(source.Monitor),
|
||||
MaxFailureRatio: source.MaxFailureRatio,
|
||||
Order: source.Order,
|
||||
Order: swarm.UpdateOrder(source.Order),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -427,19 +427,19 @@ func TestConvertCredentialSpec(t *testing.T) {
|
||||
func TestConvertUpdateConfigOrder(t *testing.T) {
|
||||
// test default behavior
|
||||
updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{})
|
||||
assert.Check(t, is.Equal("", updateConfig.Order))
|
||||
assert.Check(t, is.Equal("", string(updateConfig.Order)))
|
||||
|
||||
// test start-first
|
||||
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
|
||||
Order: "start-first",
|
||||
})
|
||||
assert.Check(t, is.Equal(updateConfig.Order, "start-first"))
|
||||
assert.Check(t, is.Equal(string(updateConfig.Order), "start-first"))
|
||||
|
||||
// test stop-first
|
||||
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
|
||||
Order: "stop-first",
|
||||
})
|
||||
assert.Check(t, is.Equal(updateConfig.Order, "stop-first"))
|
||||
assert.Check(t, is.Equal(string(updateConfig.Order), "stop-first"))
|
||||
}
|
||||
|
||||
func TestConvertFileObject(t *testing.T) {
|
||||
|
||||
@ -28,8 +28,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mattn/go-runewidth v0.0.19
|
||||
github.com/moby/go-archive v0.1.0
|
||||
github.com/moby/moby/api v1.52.0-beta.4
|
||||
github.com/moby/moby/client v0.1.0-beta.3
|
||||
github.com/moby/moby/api v1.52.0-beta.4.0.20251106210608-f7fd9c315acf
|
||||
github.com/moby/moby/client v0.1.0-beta.3.0.20251106221347-217fd7890581
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/moby/swarmkit/v2 v2.1.1
|
||||
github.com/moby/sys/atomicwriter v0.1.0
|
||||
@ -105,3 +105,5 @@ require (
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
)
|
||||
|
||||
replace github.com/moby/moby/api => github.com/moby/moby/api v1.52.0-beta.4.0.20251106221347-217fd7890581
|
||||
|
||||
@ -113,10 +113,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/moby/api v1.52.0-beta.4 h1:p05ztW787RcwcQx8YFcda1sb0ExqxDLHTjBQA3Jq6BY=
|
||||
github.com/moby/moby/api v1.52.0-beta.4/go.mod h1:v0K/motq8oWmx+rtApG1rBTIpQ8KUONUjpf+U73gags=
|
||||
github.com/moby/moby/client v0.1.0-beta.3 h1:y/ed2VpRmW8As56TFOaXF2zVSli38IS032zAAGt6kcI=
|
||||
github.com/moby/moby/client v0.1.0-beta.3/go.mod h1:dvvUX065yfMkFXvPk14AXVz8wmew2MdeXJ/rcYEPr7g=
|
||||
github.com/moby/moby/api v1.52.0-beta.4.0.20251106221347-217fd7890581 h1:Qa8MTSJUIV0K8sbG3RgJeEp5Q0GzWIVCKtalKpWewPo=
|
||||
github.com/moby/moby/api v1.52.0-beta.4.0.20251106221347-217fd7890581/go.mod h1:v0K/motq8oWmx+rtApG1rBTIpQ8KUONUjpf+U73gags=
|
||||
github.com/moby/moby/client v0.1.0-beta.3.0.20251106221347-217fd7890581 h1:pW39Fs9C96BH5ZRzwYLrIXNtFLuD0V72BkJHpwtcC/E=
|
||||
github.com/moby/moby/client v0.1.0-beta.3.0.20251106221347-217fd7890581/go.mod h1:py3jWFsk61C4EZ1Cv8zgbv7nsJ6NjGzZFVdPoSSJ3NE=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/swarmkit/v2 v2.1.1 h1:yvTJ8MMCc3f0qTA44J6R59EZ5yZawdYopkpuLk4+ICU=
|
||||
|
||||
36
vendor/github.com/moby/moby/api/types/build/disk_usage.go
generated
vendored
Normal file
36
vendor/github.com/moby/moby/api/types/build/disk_usage.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package build
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// DiskUsage represents system data usage for build cache resources.
|
||||
//
|
||||
// swagger:model DiskUsage
|
||||
type DiskUsage struct {
|
||||
|
||||
// Count of active build cache records.
|
||||
//
|
||||
// Example: 1
|
||||
ActiveBuildCacheRecords int64 `json:"ActiveBuildCacheRecords,omitempty"`
|
||||
|
||||
// List of build cache records.
|
||||
//
|
||||
Items []CacheRecord `json:"Items,omitempty"`
|
||||
|
||||
// Disk space that can be reclaimed by removing inactive build cache records.
|
||||
//
|
||||
// Example: 12345678
|
||||
Reclaimable int64 `json:"Reclaimable,omitempty"`
|
||||
|
||||
// Count of all build cache records.
|
||||
//
|
||||
// Example: 4
|
||||
TotalBuildCacheRecords int64 `json:"TotalBuildCacheRecords,omitempty"`
|
||||
|
||||
// Disk space in use by build cache records.
|
||||
//
|
||||
// Example: 98765432
|
||||
TotalSize int64 `json:"TotalSize,omitempty"`
|
||||
}
|
||||
36
vendor/github.com/moby/moby/api/types/container/disk_usage.go
generated
vendored
Normal file
36
vendor/github.com/moby/moby/api/types/container/disk_usage.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// DiskUsage represents system data usage information for container resources.
|
||||
//
|
||||
// swagger:model DiskUsage
|
||||
type DiskUsage struct {
|
||||
|
||||
// Count of active containers.
|
||||
//
|
||||
// Example: 1
|
||||
ActiveContainers int64 `json:"ActiveContainers,omitempty"`
|
||||
|
||||
// List of container summaries.
|
||||
//
|
||||
Items []Summary `json:"Items,omitempty"`
|
||||
|
||||
// Disk space that can be reclaimed by removing inactive containers.
|
||||
//
|
||||
// Example: 12345678
|
||||
Reclaimable int64 `json:"Reclaimable,omitempty"`
|
||||
|
||||
// Count of all containers.
|
||||
//
|
||||
// Example: 4
|
||||
TotalContainers int64 `json:"TotalContainers,omitempty"`
|
||||
|
||||
// Disk space in use by containers.
|
||||
//
|
||||
// Example: 98765432
|
||||
TotalSize int64 `json:"TotalSize,omitempty"`
|
||||
}
|
||||
36
vendor/github.com/moby/moby/api/types/image/disk_usage.go
generated
vendored
Normal file
36
vendor/github.com/moby/moby/api/types/image/disk_usage.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package image
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// DiskUsage represents system data usage for image resources.
|
||||
//
|
||||
// swagger:model DiskUsage
|
||||
type DiskUsage struct {
|
||||
|
||||
// Count of active images.
|
||||
//
|
||||
// Example: 1
|
||||
ActiveImages int64 `json:"ActiveImages,omitempty"`
|
||||
|
||||
// List of image summaries.
|
||||
//
|
||||
Items []Summary `json:"Items,omitempty"`
|
||||
|
||||
// Disk space that can be reclaimed by removing unused images.
|
||||
//
|
||||
// Example: 12345678
|
||||
Reclaimable int64 `json:"Reclaimable,omitempty"`
|
||||
|
||||
// Count of all images.
|
||||
//
|
||||
// Example: 4
|
||||
TotalImages int64 `json:"TotalImages,omitempty"`
|
||||
|
||||
// Disk space in use by images.
|
||||
//
|
||||
// Example: 98765432
|
||||
TotalSize int64 `json:"TotalSize,omitempty"`
|
||||
}
|
||||
31
vendor/github.com/moby/moby/api/types/swarm/service.go
generated
vendored
31
vendor/github.com/moby/moby/api/types/swarm/service.go
generated
vendored
@ -106,18 +106,27 @@ type ReplicatedJob struct {
|
||||
// This type is deliberately empty.
|
||||
type GlobalJob struct{}
|
||||
|
||||
// FailureAction is the action to perform when updating a service fails.
|
||||
type FailureAction string
|
||||
|
||||
const (
|
||||
// UpdateFailureActionPause PAUSE
|
||||
UpdateFailureActionPause = "pause"
|
||||
UpdateFailureActionPause FailureAction = "pause"
|
||||
// UpdateFailureActionContinue CONTINUE
|
||||
UpdateFailureActionContinue = "continue"
|
||||
UpdateFailureActionContinue FailureAction = "continue"
|
||||
// UpdateFailureActionRollback ROLLBACK
|
||||
UpdateFailureActionRollback = "rollback"
|
||||
UpdateFailureActionRollback FailureAction = "rollback"
|
||||
)
|
||||
|
||||
// UpdateOrder is the order of operations when rolling out or rolling back
|
||||
// an updated tasks for a service.
|
||||
type UpdateOrder string
|
||||
|
||||
const (
|
||||
// UpdateOrderStopFirst STOP_FIRST
|
||||
UpdateOrderStopFirst = "stop-first"
|
||||
UpdateOrderStopFirst UpdateOrder = "stop-first"
|
||||
// UpdateOrderStartFirst START_FIRST
|
||||
UpdateOrderStartFirst = "start-first"
|
||||
UpdateOrderStartFirst UpdateOrder = "start-first"
|
||||
)
|
||||
|
||||
// UpdateConfig represents the update configuration.
|
||||
@ -130,7 +139,7 @@ type UpdateConfig struct {
|
||||
Delay time.Duration `json:",omitempty"`
|
||||
|
||||
// FailureAction is the action to take when an update failures.
|
||||
FailureAction string `json:",omitempty"`
|
||||
FailureAction FailureAction `json:",omitempty"`
|
||||
|
||||
// Monitor indicates how long to monitor a task for failure after it is
|
||||
// created. If the task fails by ending up in one of the states
|
||||
@ -156,7 +165,7 @@ type UpdateConfig struct {
|
||||
// Order indicates the order of operations when rolling out an updated
|
||||
// task. Either the old task is shut down before the new task is
|
||||
// started, or the new task is started before the old task is shut down.
|
||||
Order string
|
||||
Order UpdateOrder
|
||||
}
|
||||
|
||||
// ServiceStatus represents the number of running tasks in a service and the
|
||||
@ -198,8 +207,12 @@ type JobStatus struct {
|
||||
LastExecution time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// RegistryAuthSource defines options for the "registryAuthFrom" query parameter
|
||||
// on service update.
|
||||
type RegistryAuthSource string
|
||||
|
||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||
const (
|
||||
RegistryAuthFromSpec = "spec"
|
||||
RegistryAuthFromPreviousSpec = "previous-spec"
|
||||
RegistryAuthFromSpec RegistryAuthSource = "spec"
|
||||
RegistryAuthFromPreviousSpec RegistryAuthSource = "previous-spec"
|
||||
)
|
||||
|
||||
28
vendor/github.com/moby/moby/api/types/system/disk_usage.go
generated
vendored
28
vendor/github.com/moby/moby/api/types/system/disk_usage.go
generated
vendored
@ -24,9 +24,27 @@ const (
|
||||
// DiskUsage contains response of Engine API:
|
||||
// GET "/system/df"
|
||||
type DiskUsage struct {
|
||||
LayersSize int64
|
||||
Images []*image.Summary
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*build.CacheRecord
|
||||
LegacyDiskUsage
|
||||
|
||||
ImageUsage *image.DiskUsage `json:"ImageUsage,omitempty"`
|
||||
ContainerUsage *container.DiskUsage `json:"ContainerUsage,omitempty"`
|
||||
VolumeUsage *volume.DiskUsage `json:"VolumeUsage,omitempty"`
|
||||
BuildCacheUsage *build.DiskUsage `json:"BuildCacheUsage,omitempty"`
|
||||
}
|
||||
|
||||
type LegacyDiskUsage struct {
|
||||
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.TotalSize] instead.
|
||||
LayersSize int64 `json:"LayersSize,omitempty"`
|
||||
|
||||
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.Items] instead.
|
||||
Images []image.Summary `json:"Images,omitzero"`
|
||||
|
||||
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ContainersDiskUsage.Items] instead.
|
||||
Containers []container.Summary `json:"Containers,omitzero"`
|
||||
|
||||
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [VolumesDiskUsage.Items] instead.
|
||||
Volumes []volume.Volume `json:"Volumes,omitzero"`
|
||||
|
||||
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [BuildCacheDiskUsage.Items] instead.
|
||||
BuildCache []build.CacheRecord `json:"BuildCache,omitzero"`
|
||||
}
|
||||
|
||||
36
vendor/github.com/moby/moby/api/types/volume/disk_usage.go
generated
vendored
Normal file
36
vendor/github.com/moby/moby/api/types/volume/disk_usage.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
package volume
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// DiskUsage represents system data usage for volume resources.
|
||||
//
|
||||
// swagger:model DiskUsage
|
||||
type DiskUsage struct {
|
||||
|
||||
// Count of active volumes.
|
||||
//
|
||||
// Example: 1
|
||||
ActiveVolumes int64 `json:"ActiveVolumes,omitempty"`
|
||||
|
||||
// List of volumes.
|
||||
//
|
||||
Items []Volume `json:"Items,omitempty"`
|
||||
|
||||
// Disk space that can be reclaimed by removing inactive volumes.
|
||||
//
|
||||
// Example: 12345678
|
||||
Reclaimable int64 `json:"Reclaimable,omitempty"`
|
||||
|
||||
// Disk space in use by volumes.
|
||||
//
|
||||
// Example: 98765432
|
||||
TotalSize int64 `json:"TotalSize,omitempty"`
|
||||
|
||||
// Count of all volumes.
|
||||
//
|
||||
// Example: 4
|
||||
TotalVolumes int64 `json:"TotalVolumes,omitempty"`
|
||||
}
|
||||
12
vendor/github.com/moby/moby/client/client_interfaces.go
generated
vendored
12
vendor/github.com/moby/moby/client/client_interfaces.go
generated
vendored
@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/moby/moby/api/types/system"
|
||||
)
|
||||
|
||||
// APIClient is an interface that clients that talk with a docker server must implement.
|
||||
@ -74,7 +72,7 @@ type ContainerAPIClient interface {
|
||||
ContainerWait(ctx context.Context, container string, options ContainerWaitOptions) ContainerWaitResult
|
||||
CopyFromContainer(ctx context.Context, container string, options CopyFromContainerOptions) (CopyFromContainerResult, error)
|
||||
CopyToContainer(ctx context.Context, container string, options CopyToContainerOptions) (CopyToContainerResult, error)
|
||||
ContainersPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error)
|
||||
ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error)
|
||||
}
|
||||
|
||||
type ExecAPIClient interface {
|
||||
@ -103,7 +101,7 @@ type ImageAPIClient interface {
|
||||
ImageRemove(ctx context.Context, image string, options ImageRemoveOptions) (ImageRemoveResult, error)
|
||||
ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error)
|
||||
ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error)
|
||||
ImagesPrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error)
|
||||
ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error)
|
||||
|
||||
ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (ImageInspectResult, error)
|
||||
ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) (ImageHistoryResult, error)
|
||||
@ -119,7 +117,7 @@ type NetworkAPIClient interface {
|
||||
NetworkInspect(ctx context.Context, network string, options NetworkInspectOptions) (NetworkInspectResult, error)
|
||||
NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error)
|
||||
NetworkRemove(ctx context.Context, network string, options NetworkRemoveOptions) (NetworkRemoveResult, error)
|
||||
NetworksPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error)
|
||||
NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error)
|
||||
}
|
||||
|
||||
// NodeAPIClient defines API client methods for the nodes
|
||||
@ -173,7 +171,7 @@ type SystemAPIClient interface {
|
||||
Events(ctx context.Context, options EventsListOptions) EventsResult
|
||||
Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error)
|
||||
RegistryLogin(ctx context.Context, auth RegistryLoginOptions) (RegistryLoginResult, error)
|
||||
DiskUsage(ctx context.Context, options DiskUsageOptions) (system.DiskUsage, error)
|
||||
DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error)
|
||||
Ping(ctx context.Context, options PingOptions) (PingResult, error)
|
||||
}
|
||||
|
||||
@ -183,7 +181,7 @@ type VolumeAPIClient interface {
|
||||
VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error)
|
||||
VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error)
|
||||
VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error)
|
||||
VolumesPrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error)
|
||||
VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error)
|
||||
VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error)
|
||||
}
|
||||
|
||||
|
||||
68
vendor/github.com/moby/moby/client/container_exec.go
generated
vendored
68
vendor/github.com/moby/moby/client/container_exec.go
generated
vendored
@ -12,17 +12,17 @@ import (
|
||||
// ExecCreateOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecCreateOptions struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
TTY bool // Attach standard streams to a tty.
|
||||
ConsoleSize ConsoleSize // Initial terminal size [height, width], unused if TTY == false
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
|
||||
// ExecCreateResult holds the result of creating a container exec.
|
||||
@ -37,11 +37,16 @@ func (cli *Client) ExecCreate(ctx context.Context, containerID string, options E
|
||||
return ExecCreateResult{}, err
|
||||
}
|
||||
|
||||
consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize)
|
||||
if err != nil {
|
||||
return ExecCreateResult{}, err
|
||||
}
|
||||
|
||||
req := container.ExecCreateRequest{
|
||||
User: options.User,
|
||||
Privileged: options.Privileged,
|
||||
Tty: options.Tty,
|
||||
ConsoleSize: options.ConsoleSize,
|
||||
Tty: options.TTY,
|
||||
ConsoleSize: consoleSize,
|
||||
AttachStdin: options.AttachStdin,
|
||||
AttachStderr: options.AttachStderr,
|
||||
AttachStdout: options.AttachStdout,
|
||||
@ -73,7 +78,7 @@ type ExecStartOptions struct {
|
||||
// Check if there's a tty
|
||||
TTY bool
|
||||
// Terminal size [height, width], unused if TTY == false
|
||||
ConsoleSize ConsoleSize `json:",omitzero"`
|
||||
ConsoleSize ConsoleSize
|
||||
}
|
||||
|
||||
// ExecStartResult holds the result of starting a container exec.
|
||||
@ -82,13 +87,16 @@ type ExecStartResult struct {
|
||||
|
||||
// ExecStart starts an exec process already created in the docker host.
|
||||
func (cli *Client) ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) {
|
||||
req := container.ExecStartRequest{
|
||||
Detach: options.Detach,
|
||||
Tty: options.TTY,
|
||||
}
|
||||
if err := applyConsoleSize(&req, &options.ConsoleSize); err != nil {
|
||||
consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize)
|
||||
if err != nil {
|
||||
return ExecStartResult{}, err
|
||||
}
|
||||
|
||||
req := container.ExecStartRequest{
|
||||
Detach: options.Detach,
|
||||
Tty: options.TTY,
|
||||
ConsoleSize: consoleSize,
|
||||
}
|
||||
resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, req, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
return ExecStartResult{}, err
|
||||
@ -126,27 +134,29 @@ type ExecAttachResult struct {
|
||||
//
|
||||
// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy
|
||||
func (cli *Client) ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) {
|
||||
req := container.ExecStartRequest{
|
||||
Detach: false,
|
||||
Tty: options.TTY,
|
||||
}
|
||||
if err := applyConsoleSize(&req, &options.ConsoleSize); err != nil {
|
||||
consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize)
|
||||
if err != nil {
|
||||
return ExecAttachResult{}, err
|
||||
}
|
||||
req := container.ExecStartRequest{
|
||||
Detach: false,
|
||||
Tty: options.TTY,
|
||||
ConsoleSize: consoleSize,
|
||||
}
|
||||
response, err := cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, req, http.Header{
|
||||
"Content-Type": {"application/json"},
|
||||
})
|
||||
return ExecAttachResult{HijackedResponse: response}, err
|
||||
}
|
||||
|
||||
func applyConsoleSize(req *container.ExecStartRequest, consoleSize *ConsoleSize) error {
|
||||
func getConsoleSize(hasTTY bool, consoleSize ConsoleSize) (*[2]uint, error) {
|
||||
if consoleSize.Height != 0 || consoleSize.Width != 0 {
|
||||
if !req.Tty {
|
||||
return errdefs.ErrInvalidArgument.WithMessage("console size is only supported when TTY is enabled")
|
||||
if !hasTTY {
|
||||
return nil, errdefs.ErrInvalidArgument.WithMessage("console size is only supported when TTY is enabled")
|
||||
}
|
||||
req.ConsoleSize = &[2]uint{consoleSize.Height, consoleSize.Width}
|
||||
return &[2]uint{consoleSize.Height, consoleSize.Width}, nil
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ExecInspectOptions holds options for inspecting a container exec.
|
||||
|
||||
45
vendor/github.com/moby/moby/client/container_export.go
generated
vendored
45
vendor/github.com/moby/moby/client/container_export.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ContainerExportOptions specifies options for container export operations.
|
||||
@ -13,50 +12,36 @@ type ContainerExportOptions struct {
|
||||
}
|
||||
|
||||
// ContainerExportResult represents the result of a container export operation.
|
||||
type ContainerExportResult struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
type ContainerExportResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ContainerExport retrieves the raw contents of a container
|
||||
// and returns them as an [io.ReadCloser]. It's up to the caller
|
||||
// to close the stream.
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) ContainerExport(ctx context.Context, containerID string, options ContainerExportOptions) (ContainerExportResult, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return ContainerExportResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
|
||||
if err != nil {
|
||||
return ContainerExportResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newContainerExportResult(resp.Body), nil
|
||||
return &containerExportResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newContainerExportResult(rc io.ReadCloser) ContainerExportResult {
|
||||
if rc == nil {
|
||||
panic("nil io.ReadCloser")
|
||||
}
|
||||
return ContainerExportResult{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
type containerExportResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// Read implements io.ReadCloser
|
||||
func (r ContainerExportResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close implements io.ReadCloser
|
||||
func (r ContainerExportResult) Close() error {
|
||||
if r.close == nil {
|
||||
return nil
|
||||
}
|
||||
return r.close()
|
||||
}
|
||||
var (
|
||||
_ io.ReadCloser = (*containerExportResult)(nil)
|
||||
_ ContainerExportResult = (*containerExportResult)(nil)
|
||||
)
|
||||
|
||||
49
vendor/github.com/moby/moby/client/container_logs.go
generated
vendored
49
vendor/github.com/moby/moby/client/container_logs.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/moby/moby/client/internal/timestamp"
|
||||
@ -24,14 +23,15 @@ type ContainerLogsOptions struct {
|
||||
}
|
||||
|
||||
// ContainerLogsResult is the result of a container logs operation.
|
||||
type ContainerLogsResult struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
type ContainerLogsResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ContainerLogs returns the logs generated by a container in an [io.ReadCloser].
|
||||
// It's up to the caller to close the stream.
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
//
|
||||
// The stream format on the response uses one of two formats:
|
||||
//
|
||||
// - If the container is using a TTY, there is only a single stream (stdout)
|
||||
@ -58,7 +58,7 @@ type ContainerLogsResult struct {
|
||||
func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options ContainerLogsOptions) (ContainerLogsResult, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return ContainerLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
@ -73,7 +73,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, containerID string, option
|
||||
if options.Since != "" {
|
||||
ts, err := timestamp.GetTimestamp(options.Since, time.Now())
|
||||
if err != nil {
|
||||
return ContainerLogsResult{}, fmt.Errorf(`invalid value for "since": %w`, err)
|
||||
return nil, fmt.Errorf(`invalid value for "since": %w`, err)
|
||||
}
|
||||
query.Set("since", ts)
|
||||
}
|
||||
@ -81,7 +81,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, containerID string, option
|
||||
if options.Until != "" {
|
||||
ts, err := timestamp.GetTimestamp(options.Until, time.Now())
|
||||
if err != nil {
|
||||
return ContainerLogsResult{}, fmt.Errorf(`invalid value for "until": %w`, err)
|
||||
return nil, fmt.Errorf(`invalid value for "until": %w`, err)
|
||||
}
|
||||
query.Set("until", ts)
|
||||
}
|
||||
@ -101,33 +101,18 @@ func (cli *Client) ContainerLogs(ctx context.Context, containerID string, option
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil)
|
||||
if err != nil {
|
||||
return ContainerLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return newContainerLogsResult(resp.Body), nil
|
||||
return &containerLogsResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newContainerLogsResult(rc io.ReadCloser) ContainerLogsResult {
|
||||
if rc == nil {
|
||||
panic("rc cannot be nil")
|
||||
}
|
||||
return ContainerLogsResult{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
type containerLogsResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (r ContainerLogsResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close closes the underlying reader.
|
||||
func (r ContainerLogsResult) Close() error {
|
||||
if r.close == nil {
|
||||
return nil
|
||||
}
|
||||
return r.close()
|
||||
}
|
||||
var (
|
||||
_ io.ReadCloser = (*containerLogsResult)(nil)
|
||||
_ ContainerLogsResult = (*containerLogsResult)(nil)
|
||||
)
|
||||
|
||||
6
vendor/github.com/moby/moby/client/container_prune.go
generated
vendored
6
vendor/github.com/moby/moby/client/container_prune.go
generated
vendored
@ -14,13 +14,13 @@ type ContainerPruneOptions struct {
|
||||
Filters Filters
|
||||
}
|
||||
|
||||
// ContainerPruneResult holds the result from the [Client.ContainersPrune] method.
|
||||
// ContainerPruneResult holds the result from the [Client.ContainerPrune] method.
|
||||
type ContainerPruneResult struct {
|
||||
Report container.PruneReport
|
||||
}
|
||||
|
||||
// ContainersPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) {
|
||||
// ContainerPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) {
|
||||
query := url.Values{}
|
||||
opts.Filters.updateURLValues(query)
|
||||
|
||||
|
||||
4
vendor/github.com/moby/moby/client/container_stats.go
generated
vendored
4
vendor/github.com/moby/moby/client/container_stats.go
generated
vendored
@ -43,6 +43,8 @@ type ContainerStatsResult struct {
|
||||
// ContainerStats retrieves live resource usage statistics for the specified
|
||||
// container. The caller must close the [io.ReadCloser] in the returned result
|
||||
// to release associated resources.
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) ContainerStats(ctx context.Context, containerID string, options ContainerStatsOptions) (ContainerStatsResult, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
@ -68,6 +70,6 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, optio
|
||||
}
|
||||
|
||||
return ContainerStatsResult{
|
||||
Body: resp.Body,
|
||||
Body: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
30
vendor/github.com/moby/moby/client/image_import.go
generated
vendored
30
vendor/github.com/moby/moby/client/image_import.go
generated
vendored
@ -2,18 +2,26 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
)
|
||||
|
||||
// ImageImport creates a new image based on the source options.
|
||||
// It returns the JSON content in the response body.
|
||||
// ImageImportResult holds the response body returned by the daemon for image import.
|
||||
type ImageImportResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ImageImport creates a new image based on the source options. It returns the
|
||||
// JSON content in the [ImageImportResult].
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) {
|
||||
if ref != "" {
|
||||
// Check if the given image name can be resolved
|
||||
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
|
||||
return ImageImportResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -40,7 +48,19 @@ func (cli *Client) ImageImport(ctx context.Context, source ImageImportSource, re
|
||||
|
||||
resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
|
||||
if err != nil {
|
||||
return ImageImportResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return ImageImportResult{rc: resp.Body}, nil
|
||||
return &imageImportResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImageImportResult holds the response body returned by the daemon for image import.
|
||||
type imageImportResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReadCloser = (*imageImportResult)(nil)
|
||||
_ ImageImportResult = (*imageImportResult)(nil)
|
||||
)
|
||||
|
||||
19
vendor/github.com/moby/moby/client/image_import_opts.go
generated
vendored
19
vendor/github.com/moby/moby/client/image_import_opts.go
generated
vendored
@ -19,22 +19,3 @@ type ImageImportOptions struct {
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform ocispec.Platform // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// ImageImportResult holds the response body returned by the daemon for image import.
|
||||
type ImageImportResult struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (r ImageImportResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
func (r ImageImportResult) Close() error {
|
||||
if r.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return r.rc.Close()
|
||||
}
|
||||
|
||||
48
vendor/github.com/moby/moby/client/image_load.go
generated
vendored
48
vendor/github.com/moby/moby/client/image_load.go
generated
vendored
@ -7,18 +7,21 @@ import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ImageLoad loads an image in the docker host from the client host.
|
||||
// It's up to the caller to close the [io.ReadCloser] in the
|
||||
// [ImageLoadResult] returned by this function.
|
||||
// ImageLoadResult returns information to the client about a load process.
|
||||
// It implements [io.ReadCloser] and must be closed to avoid a resource leak.
|
||||
type ImageLoadResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ImageLoad loads an image in the docker host from the client host. It's up
|
||||
// to the caller to close the [ImageLoadResult] returned by this function.
|
||||
//
|
||||
// Platform is an optional parameter that specifies the platform to load from
|
||||
// the provided multi-platform image. Passing a platform only has an effect
|
||||
// if the input image is a multi-platform image.
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (ImageLoadResult, error) {
|
||||
var opts imageLoadOpts
|
||||
for _, opt := range loadOpts {
|
||||
if err := opt.Apply(&opts); err != nil {
|
||||
return ImageLoadResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -29,12 +32,12 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...I
|
||||
}
|
||||
if len(opts.apiOptions.Platforms) > 0 {
|
||||
if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil {
|
||||
return ImageLoadResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := encodePlatforms(opts.apiOptions.Platforms...)
|
||||
if err != nil {
|
||||
return ImageLoadResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
query["platform"] = p
|
||||
}
|
||||
@ -43,26 +46,19 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...I
|
||||
"Content-Type": {"application/x-tar"},
|
||||
})
|
||||
if err != nil {
|
||||
return ImageLoadResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return ImageLoadResult{
|
||||
body: resp.Body,
|
||||
return &imageLoadResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImageLoadResult returns information to the client about a load process.
|
||||
type ImageLoadResult struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
body io.ReadCloser
|
||||
// imageLoadResult returns information to the client about a load process.
|
||||
type imageLoadResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (r ImageLoadResult) Read(p []byte) (n int, err error) {
|
||||
return r.body.Read(p)
|
||||
}
|
||||
|
||||
func (r ImageLoadResult) Close() error {
|
||||
if r.body == nil {
|
||||
return nil
|
||||
}
|
||||
return r.body.Close()
|
||||
}
|
||||
var (
|
||||
_ io.ReadCloser = (*imageLoadResult)(nil)
|
||||
_ ImageLoadResult = (*imageLoadResult)(nil)
|
||||
)
|
||||
|
||||
4
vendor/github.com/moby/moby/client/image_load_opts.go
generated
vendored
4
vendor/github.com/moby/moby/client/image_load_opts.go
generated
vendored
@ -38,6 +38,10 @@ func ImageLoadWithQuiet(quiet bool) ImageLoadOption {
|
||||
}
|
||||
|
||||
// ImageLoadWithPlatforms sets the platforms to be loaded from the image.
|
||||
//
|
||||
// Platform is an optional parameter that specifies the platform to load from
|
||||
// the provided multi-platform image. Passing a platform only has an effect
|
||||
// if the input image is a multi-platform image.
|
||||
func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption {
|
||||
return imageLoadOptionFunc(func(opt *imageLoadOpts) error {
|
||||
if opt.apiOptions.Platforms != nil {
|
||||
|
||||
6
vendor/github.com/moby/moby/client/image_prune.go
generated
vendored
6
vendor/github.com/moby/moby/client/image_prune.go
generated
vendored
@ -14,13 +14,13 @@ type ImagePruneOptions struct {
|
||||
Filters Filters
|
||||
}
|
||||
|
||||
// ImagePruneResult holds the result from the [Client.ImagesPrune] method.
|
||||
// ImagePruneResult holds the result from the [Client.ImagePrune] method.
|
||||
type ImagePruneResult struct {
|
||||
Report image.PruneReport
|
||||
}
|
||||
|
||||
// ImagesPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ImagesPrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) {
|
||||
// ImagePrune requests the daemon to delete unused data
|
||||
func (cli *Client) ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) {
|
||||
query := url.Values{}
|
||||
opts.Filters.updateURLValues(query)
|
||||
|
||||
|
||||
29
vendor/github.com/moby/moby/client/image_save.go
generated
vendored
29
vendor/github.com/moby/moby/client/image_save.go
generated
vendored
@ -2,11 +2,17 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type ImageSaveResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ImageSave retrieves one or more images from the docker host as an
|
||||
// [ImageSaveResult].
|
||||
// [ImageSaveResult]. Callers should close the reader, but the underlying
|
||||
// [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
//
|
||||
// Platforms is an optional parameter that specifies the platforms to save
|
||||
// from the image. Passing a platform only has an effect if the input image
|
||||
@ -15,7 +21,7 @@ func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ..
|
||||
var opts imageSaveOpts
|
||||
for _, opt := range saveOpts {
|
||||
if err := opt.Apply(&opts); err != nil {
|
||||
return ImageSaveResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,18 +31,29 @@ func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ..
|
||||
|
||||
if len(opts.apiOptions.Platforms) > 0 {
|
||||
if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil {
|
||||
return ImageSaveResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
p, err := encodePlatforms(opts.apiOptions.Platforms...)
|
||||
if err != nil {
|
||||
return ImageSaveResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
query["platform"] = p
|
||||
}
|
||||
|
||||
resp, err := cli.get(ctx, "/images/get", query, nil)
|
||||
if err != nil {
|
||||
return ImageSaveResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return newImageSaveResult(resp.Body), nil
|
||||
return &imageSaveResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type imageSaveResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReadCloser = (*imageSaveResult)(nil)
|
||||
_ ImageSaveResult = (*imageSaveResult)(nil)
|
||||
)
|
||||
|
||||
38
vendor/github.com/moby/moby/client/image_save_opts.go
generated
vendored
38
vendor/github.com/moby/moby/client/image_save_opts.go
generated
vendored
@ -2,8 +2,6 @@ package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@ -18,8 +16,11 @@ func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error {
|
||||
return f(o)
|
||||
}
|
||||
|
||||
// ImageSaveWithPlatforms sets the platforms to be saved from the image.
|
||||
// ImageSaveWithPlatforms sets the platforms to be saved from the image. It
|
||||
// produces an error if platforms are already set. This option only has an
|
||||
// effect if the input image is a multi-platform image.
|
||||
func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption {
|
||||
// TODO(thaJeztah): verify the GoDoc; do we produce an error for a single-platform image without the given platform?
|
||||
return imageSaveOptionFunc(func(opt *imageSaveOpts) error {
|
||||
if opt.apiOptions.Platforms != nil {
|
||||
return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms)
|
||||
@ -38,34 +39,3 @@ type imageSaveOptions struct {
|
||||
// multi-platform image and has multiple variants.
|
||||
Platforms []ocispec.Platform
|
||||
}
|
||||
|
||||
func newImageSaveResult(rc io.ReadCloser) ImageSaveResult {
|
||||
if rc == nil {
|
||||
panic("nil io.ReadCloser")
|
||||
}
|
||||
return ImageSaveResult{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
}
|
||||
|
||||
type ImageSaveResult struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
}
|
||||
|
||||
// Read implements io.ReadCloser
|
||||
func (r ImageSaveResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close implements io.ReadCloser
|
||||
func (r ImageSaveResult) Close() error {
|
||||
if r.close == nil {
|
||||
return nil
|
||||
}
|
||||
return r.close()
|
||||
}
|
||||
|
||||
6
vendor/github.com/moby/moby/client/network_prune.go
generated
vendored
6
vendor/github.com/moby/moby/client/network_prune.go
generated
vendored
@ -14,13 +14,13 @@ type NetworkPruneOptions struct {
|
||||
Filters Filters
|
||||
}
|
||||
|
||||
// NetworkPruneResult holds the result from the [Client.NetworksPrune] method.
|
||||
// NetworkPruneResult holds the result from the [Client.NetworkPrune] method.
|
||||
type NetworkPruneResult struct {
|
||||
Report network.PruneReport
|
||||
}
|
||||
|
||||
// NetworksPrune requests the daemon to delete unused networks
|
||||
func (cli *Client) NetworksPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) {
|
||||
// NetworkPrune requests the daemon to delete unused networks
|
||||
func (cli *Client) NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) {
|
||||
query := url.Values{}
|
||||
opts.Filters.updateURLValues(query)
|
||||
|
||||
|
||||
26
vendor/github.com/moby/moby/client/pkg/security/security_opts.go
generated
vendored
26
vendor/github.com/moby/moby/client/pkg/security/security_opts.go
generated
vendored
@ -1,8 +1,6 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -19,22 +17,14 @@ type KeyValue struct {
|
||||
|
||||
// DecodeOptions decodes a security options string slice to a
|
||||
// type-safe [Option].
|
||||
func DecodeOptions(opts []string) ([]Option, error) {
|
||||
so := []Option{}
|
||||
func DecodeOptions(opts []string) []Option {
|
||||
so := make([]Option, 0, len(opts))
|
||||
for _, opt := range opts {
|
||||
// support output from a < 1.13 docker daemon
|
||||
if !strings.Contains(opt, "=") {
|
||||
so = append(so, Option{Name: opt})
|
||||
continue
|
||||
}
|
||||
secopt := Option{}
|
||||
for _, s := range strings.Split(opt, ",") {
|
||||
k, v, ok := strings.Cut(s, "=")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid security option %q", s)
|
||||
}
|
||||
if k == "" || v == "" {
|
||||
return nil, errors.New("invalid empty security option")
|
||||
k, v, _ := strings.Cut(s, "=")
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
if k == "name" {
|
||||
secopt.Name = v
|
||||
@ -42,7 +32,9 @@ func DecodeOptions(opts []string) ([]Option, error) {
|
||||
}
|
||||
secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v})
|
||||
}
|
||||
so = append(so, secopt)
|
||||
if secopt.Name != "" {
|
||||
so = append(so, secopt)
|
||||
}
|
||||
}
|
||||
return so, nil
|
||||
return so
|
||||
}
|
||||
|
||||
53
vendor/github.com/moby/moby/client/service_logs.go
generated
vendored
53
vendor/github.com/moby/moby/client/service_logs.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/moby/moby/client/internal/timestamp"
|
||||
@ -26,16 +25,21 @@ type ServiceLogsOptions struct {
|
||||
// ServiceLogsResult holds the result of a service logs operation.
|
||||
// It implements [io.ReadCloser].
|
||||
// It's up to the caller to close the stream.
|
||||
type ServiceLogsResult struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
type ServiceLogsResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// ServiceLogs returns the logs generated by a service in an [ServiceLogsResult].
|
||||
// ServiceLogs returns the logs generated by a service in a [ServiceLogsResult].
|
||||
// as an [io.ReadCloser]. Callers should close the stream.
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) {
|
||||
// TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs)
|
||||
// TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348
|
||||
|
||||
serviceID, err := trimID("service", serviceID)
|
||||
if err != nil {
|
||||
return ServiceLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
@ -50,7 +54,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options Se
|
||||
if options.Since != "" {
|
||||
ts, err := timestamp.GetTimestamp(options.Since, time.Now())
|
||||
if err != nil {
|
||||
return ServiceLogsResult{}, fmt.Errorf(`invalid value for "since": %w`, err)
|
||||
return nil, fmt.Errorf(`invalid value for "since": %w`, err)
|
||||
}
|
||||
query.Set("since", ts)
|
||||
}
|
||||
@ -70,33 +74,18 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options Se
|
||||
|
||||
resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
|
||||
if err != nil {
|
||||
return ServiceLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return newServiceLogsResult(resp.Body), nil
|
||||
return &serviceLogsResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newServiceLogsResult(rc io.ReadCloser) ServiceLogsResult {
|
||||
if rc == nil {
|
||||
panic("nil io.ReadCloser")
|
||||
}
|
||||
return ServiceLogsResult{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
type serviceLogsResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// Read implements [io.ReadCloser] for LogsResult.
|
||||
func (r ServiceLogsResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close implements [io.ReadCloser] for LogsResult.
|
||||
func (r ServiceLogsResult) Close() error {
|
||||
if r.close == nil {
|
||||
return nil
|
||||
}
|
||||
return r.close()
|
||||
}
|
||||
var (
|
||||
_ io.ReadCloser = (*serviceLogsResult)(nil)
|
||||
_ ServiceLogsResult = (*serviceLogsResult)(nil)
|
||||
)
|
||||
|
||||
4
vendor/github.com/moby/moby/client/service_update.go
generated
vendored
4
vendor/github.com/moby/moby/client/service_update.go
generated
vendored
@ -28,7 +28,7 @@ type ServiceUpdateOptions struct {
|
||||
// RegistryAuthFrom specifies where to find the registry authorization
|
||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||
// values are "spec" and "previous-spec".
|
||||
RegistryAuthFrom string
|
||||
RegistryAuthFrom swarm.RegistryAuthSource
|
||||
|
||||
// Rollback indicates whether a server-side rollback should be
|
||||
// performed. When this is set, the provided spec will be ignored.
|
||||
@ -65,7 +65,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, options
|
||||
|
||||
query := url.Values{}
|
||||
if options.RegistryAuthFrom != "" {
|
||||
query.Set("registryAuthFrom", options.RegistryAuthFrom)
|
||||
query.Set("registryAuthFrom", string(options.RegistryAuthFrom))
|
||||
}
|
||||
|
||||
if options.Rollback != "" {
|
||||
|
||||
306
vendor/github.com/moby/moby/client/system_disk_usage.go
generated
vendored
306
vendor/github.com/moby/moby/client/system_disk_usage.go
generated
vendored
@ -5,29 +5,315 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/moby/api/types/build"
|
||||
"github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/api/types/image"
|
||||
"github.com/moby/moby/api/types/system"
|
||||
"github.com/moby/moby/api/types/volume"
|
||||
"github.com/moby/moby/client/pkg/versions"
|
||||
)
|
||||
|
||||
// DiskUsage requests the current data usage from the daemon
|
||||
func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (system.DiskUsage, error) {
|
||||
var query url.Values
|
||||
if len(options.Types) > 0 {
|
||||
query = url.Values{}
|
||||
for _, t := range options.Types {
|
||||
query.Add("type", string(t))
|
||||
// DiskUsageOptions holds parameters for [Client.DiskUsage] operations.
|
||||
type DiskUsageOptions struct {
|
||||
// Containers controls whether container disk usage should be computed.
|
||||
Containers bool
|
||||
|
||||
// Images controls whether image disk usage should be computed.
|
||||
Images bool
|
||||
|
||||
// BuildCache controls whether build cache disk usage should be computed.
|
||||
BuildCache bool
|
||||
|
||||
// Volumes controls whether volume disk usage should be computed.
|
||||
Volumes bool
|
||||
|
||||
// Verbose enables more detailed disk usage information.
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// DiskUsageResult is the result of [Client.DiskUsage] operations.
|
||||
type DiskUsageResult struct {
|
||||
// Containers holds container disk usage information.
|
||||
Containers ContainersDiskUsage
|
||||
|
||||
// Images holds image disk usage information.
|
||||
Images ImagesDiskUsage
|
||||
|
||||
// BuildCache holds build cache disk usage information.
|
||||
BuildCache BuildCacheDiskUsage
|
||||
|
||||
// Volumes holds volume disk usage information.
|
||||
Volumes VolumesDiskUsage
|
||||
}
|
||||
|
||||
// ContainersDiskUsage contains disk usage information for containers.
|
||||
type ContainersDiskUsage struct {
|
||||
// ActiveContainers is the number of active containers.
|
||||
ActiveContainers int64
|
||||
|
||||
// TotalContainers is the total number of containers.
|
||||
TotalContainers int64
|
||||
|
||||
// Reclaimable is the amount of disk space that can be reclaimed.
|
||||
Reclaimable int64
|
||||
|
||||
// TotalSize is the total disk space used by all containers.
|
||||
TotalSize int64
|
||||
|
||||
// Items holds detailed information about each container.
|
||||
Items []container.Summary
|
||||
}
|
||||
|
||||
// ImagesDiskUsage contains disk usage information for images.
|
||||
type ImagesDiskUsage struct {
|
||||
// ActiveImages is the number of active images.
|
||||
ActiveImages int64
|
||||
|
||||
// TotalImages is the total number of images.
|
||||
TotalImages int64
|
||||
|
||||
// Reclaimable is the amount of disk space that can be reclaimed.
|
||||
Reclaimable int64
|
||||
|
||||
// TotalSize is the total disk space used by all images.
|
||||
TotalSize int64
|
||||
|
||||
// Items holds detailed information about each image.
|
||||
Items []image.Summary
|
||||
}
|
||||
|
||||
// VolumesDiskUsage contains disk usage information for volumes.
|
||||
type VolumesDiskUsage struct {
|
||||
// ActiveVolumes is the number of active volumes.
|
||||
ActiveVolumes int64
|
||||
|
||||
// TotalVolumes is the total number of volumes.
|
||||
TotalVolumes int64
|
||||
|
||||
// Reclaimable is the amount of disk space that can be reclaimed.
|
||||
Reclaimable int64
|
||||
|
||||
// TotalSize is the total disk space used by all volumes.
|
||||
TotalSize int64
|
||||
|
||||
// Items holds detailed information about each volume.
|
||||
Items []volume.Volume
|
||||
}
|
||||
|
||||
// BuildCacheDiskUsage contains disk usage information for build cache.
|
||||
type BuildCacheDiskUsage struct {
|
||||
// ActiveBuildCacheRecords is the number of active build cache records.
|
||||
ActiveBuildCacheRecords int64
|
||||
|
||||
// TotalBuildCacheRecords is the total number of build cache records.
|
||||
TotalBuildCacheRecords int64
|
||||
|
||||
// Reclaimable is the amount of disk space that can be reclaimed.
|
||||
Reclaimable int64
|
||||
|
||||
// TotalSize is the total disk space used by all build cache records.
|
||||
TotalSize int64
|
||||
|
||||
// Items holds detailed information about each build cache record.
|
||||
Items []build.CacheRecord
|
||||
}
|
||||
|
||||
// DiskUsage requests the current data usage from the daemon.
|
||||
func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) {
|
||||
query := url.Values{}
|
||||
|
||||
for _, t := range []struct {
|
||||
flag bool
|
||||
sysObj system.DiskUsageObject
|
||||
}{
|
||||
{options.Containers, system.ContainerObject},
|
||||
{options.Images, system.ImageObject},
|
||||
{options.Volumes, system.VolumeObject},
|
||||
{options.BuildCache, system.BuildCacheObject},
|
||||
} {
|
||||
if t.flag {
|
||||
query.Add("type", string(t.sysObj))
|
||||
}
|
||||
}
|
||||
|
||||
if options.Verbose {
|
||||
query.Set("verbose", "1")
|
||||
}
|
||||
|
||||
resp, err := cli.get(ctx, "/system/df", query, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return system.DiskUsage{}, err
|
||||
return DiskUsageResult{}, err
|
||||
}
|
||||
|
||||
var du system.DiskUsage
|
||||
if err := json.NewDecoder(resp.Body).Decode(&du); err != nil {
|
||||
return system.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err)
|
||||
return DiskUsageResult{}, fmt.Errorf("Error retrieving disk usage: %v", err)
|
||||
}
|
||||
return du, nil
|
||||
|
||||
// Generate result from a legacy response.
|
||||
if versions.LessThan(cli.version, "1.52") {
|
||||
return diskUsageResultFromLegacyAPI(&du), nil
|
||||
}
|
||||
|
||||
var r DiskUsageResult
|
||||
if idu := du.ImageUsage; idu != nil {
|
||||
r.Images = ImagesDiskUsage{
|
||||
ActiveImages: idu.ActiveImages,
|
||||
Reclaimable: idu.Reclaimable,
|
||||
TotalImages: idu.TotalImages,
|
||||
TotalSize: idu.TotalSize,
|
||||
}
|
||||
|
||||
if options.Verbose {
|
||||
r.Images.Items = slices.Clone(idu.Items)
|
||||
}
|
||||
}
|
||||
|
||||
if cdu := du.ContainerUsage; cdu != nil {
|
||||
r.Containers = ContainersDiskUsage{
|
||||
ActiveContainers: cdu.ActiveContainers,
|
||||
Reclaimable: cdu.Reclaimable,
|
||||
TotalContainers: cdu.TotalContainers,
|
||||
TotalSize: cdu.TotalSize,
|
||||
}
|
||||
|
||||
if options.Verbose {
|
||||
r.Containers.Items = slices.Clone(cdu.Items)
|
||||
}
|
||||
}
|
||||
|
||||
if bdu := du.BuildCacheUsage; bdu != nil {
|
||||
r.BuildCache = BuildCacheDiskUsage{
|
||||
ActiveBuildCacheRecords: bdu.ActiveBuildCacheRecords,
|
||||
Reclaimable: bdu.Reclaimable,
|
||||
TotalBuildCacheRecords: bdu.TotalBuildCacheRecords,
|
||||
TotalSize: bdu.TotalSize,
|
||||
}
|
||||
|
||||
if options.Verbose {
|
||||
r.BuildCache.Items = slices.Clone(bdu.Items)
|
||||
}
|
||||
}
|
||||
|
||||
if vdu := du.VolumeUsage; vdu != nil {
|
||||
r.Volumes = VolumesDiskUsage{
|
||||
ActiveVolumes: vdu.ActiveVolumes,
|
||||
Reclaimable: vdu.Reclaimable,
|
||||
TotalVolumes: vdu.TotalVolumes,
|
||||
TotalSize: vdu.TotalSize,
|
||||
}
|
||||
|
||||
if options.Verbose {
|
||||
r.Volumes.Items = slices.Clone(vdu.Items)
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func diskUsageResultFromLegacyAPI(du *system.DiskUsage) DiskUsageResult {
|
||||
return DiskUsageResult{
|
||||
Images: imageDiskUsageFromLegacyAPI(du),
|
||||
Containers: containerDiskUsageFromLegacyAPI(du),
|
||||
BuildCache: buildCacheDiskUsageFromLegacyAPI(du),
|
||||
Volumes: volumeDiskUsageFromLegacyAPI(du),
|
||||
}
|
||||
}
|
||||
|
||||
func imageDiskUsageFromLegacyAPI(du *system.DiskUsage) ImagesDiskUsage {
|
||||
idu := ImagesDiskUsage{
|
||||
TotalSize: du.LayersSize,
|
||||
TotalImages: int64(len(du.Images)),
|
||||
Items: du.Images,
|
||||
}
|
||||
|
||||
var used int64
|
||||
for _, i := range idu.Items {
|
||||
if i.Containers > 0 {
|
||||
idu.ActiveImages++
|
||||
|
||||
if i.Size == -1 || i.SharedSize == -1 {
|
||||
continue
|
||||
}
|
||||
used += (i.Size - i.SharedSize)
|
||||
}
|
||||
}
|
||||
|
||||
if idu.TotalImages > 0 {
|
||||
idu.Reclaimable = idu.TotalSize - used
|
||||
}
|
||||
|
||||
return idu
|
||||
}
|
||||
|
||||
func containerDiskUsageFromLegacyAPI(du *system.DiskUsage) ContainersDiskUsage {
|
||||
cdu := ContainersDiskUsage{
|
||||
TotalContainers: int64(len(du.Containers)),
|
||||
Items: du.Containers,
|
||||
}
|
||||
|
||||
var used int64
|
||||
for _, c := range cdu.Items {
|
||||
cdu.TotalSize += c.SizeRw
|
||||
switch strings.ToLower(c.State) {
|
||||
case "running", "paused", "restarting":
|
||||
cdu.ActiveContainers++
|
||||
used += c.SizeRw
|
||||
}
|
||||
}
|
||||
|
||||
cdu.Reclaimable = cdu.TotalSize - used
|
||||
return cdu
|
||||
}
|
||||
|
||||
func buildCacheDiskUsageFromLegacyAPI(du *system.DiskUsage) BuildCacheDiskUsage {
|
||||
bdu := BuildCacheDiskUsage{
|
||||
TotalBuildCacheRecords: int64(len(du.BuildCache)),
|
||||
Items: du.BuildCache,
|
||||
}
|
||||
|
||||
var used int64
|
||||
for _, b := range du.BuildCache {
|
||||
if !b.Shared {
|
||||
bdu.TotalSize += b.Size
|
||||
}
|
||||
|
||||
if b.InUse {
|
||||
bdu.ActiveBuildCacheRecords++
|
||||
if !b.Shared {
|
||||
used += b.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bdu.Reclaimable = bdu.TotalSize - used
|
||||
return bdu
|
||||
}
|
||||
|
||||
func volumeDiskUsageFromLegacyAPI(du *system.DiskUsage) VolumesDiskUsage {
|
||||
vdu := VolumesDiskUsage{
|
||||
TotalVolumes: int64(len(du.Volumes)),
|
||||
Items: du.Volumes,
|
||||
}
|
||||
|
||||
var used int64
|
||||
for _, v := range vdu.Items {
|
||||
// Ignore volumes with no usage data
|
||||
if v.UsageData != nil {
|
||||
if v.UsageData.RefCount > 0 {
|
||||
vdu.ActiveVolumes++
|
||||
used += v.UsageData.Size
|
||||
}
|
||||
if v.UsageData.Size > 0 {
|
||||
vdu.TotalSize += v.UsageData.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vdu.Reclaimable = vdu.TotalSize - used
|
||||
return vdu
|
||||
}
|
||||
|
||||
10
vendor/github.com/moby/moby/client/system_disk_usage_opts.go
generated
vendored
10
vendor/github.com/moby/moby/client/system_disk_usage_opts.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package client
|
||||
|
||||
import "github.com/moby/moby/api/types/system"
|
||||
|
||||
// DiskUsageOptions holds parameters for system disk usage query.
|
||||
type DiskUsageOptions struct {
|
||||
// Types specifies what object types to include in the response. If empty,
|
||||
// all object types are returned.
|
||||
Types []system.DiskUsageObject
|
||||
}
|
||||
52
vendor/github.com/moby/moby/client/task_logs.go
generated
vendored
52
vendor/github.com/moby/moby/client/task_logs.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/moby/moby/client/internal/timestamp"
|
||||
@ -24,14 +23,18 @@ type TaskLogsOptions struct {
|
||||
|
||||
// TaskLogsResult holds the result of a task logs operation.
|
||||
// It implements [io.ReadCloser].
|
||||
type TaskLogsResult struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
type TaskLogsResult interface {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// TaskLogs returns the logs generated by a task.
|
||||
// It's up to the caller to close the stream.
|
||||
// TaskLogs returns the logs generated by a service in a [TaskLogsResult].
|
||||
// as an [io.ReadCloser]. Callers should close the stream.
|
||||
//
|
||||
// The underlying [io.ReadCloser] is automatically closed if the context is canceled,
|
||||
func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) {
|
||||
// TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs)
|
||||
// TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348
|
||||
|
||||
query := url.Values{}
|
||||
if options.ShowStdout {
|
||||
query.Set("stdout", "1")
|
||||
@ -44,7 +47,7 @@ func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogs
|
||||
if options.Since != "" {
|
||||
ts, err := timestamp.GetTimestamp(options.Since, time.Now())
|
||||
if err != nil {
|
||||
return TaskLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
query.Set("since", ts)
|
||||
}
|
||||
@ -64,33 +67,18 @@ func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogs
|
||||
|
||||
resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
|
||||
if err != nil {
|
||||
return TaskLogsResult{}, err
|
||||
return nil, err
|
||||
}
|
||||
return newTaskLogsResult(resp.Body), nil
|
||||
return &taskLogsResult{
|
||||
ReadCloser: newCancelReadCloser(ctx, resp.Body),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newTaskLogsResult(rc io.ReadCloser) TaskLogsResult {
|
||||
if rc == nil {
|
||||
panic("nil io.ReadCloser")
|
||||
}
|
||||
return TaskLogsResult{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
type taskLogsResult struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// Read implements [io.ReadCloser] for LogsResult.
|
||||
func (r TaskLogsResult) Read(p []byte) (n int, err error) {
|
||||
if r.rc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close implements [io.ReadCloser] for LogsResult.
|
||||
func (r TaskLogsResult) Close() error {
|
||||
if r.close == nil {
|
||||
return nil
|
||||
}
|
||||
return r.close()
|
||||
}
|
||||
var (
|
||||
_ io.ReadCloser = (*taskLogsResult)(nil)
|
||||
_ ContainerLogsResult = (*taskLogsResult)(nil)
|
||||
)
|
||||
|
||||
21
vendor/github.com/moby/moby/client/utils.go
generated
vendored
21
vendor/github.com/moby/moby/client/utils.go
generated
vendored
@ -2,12 +2,14 @@ package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -84,3 +86,22 @@ func decodeWithRaw[T any](resp *http.Response, out *T) (raw json.RawMessage, _ e
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// newCancelReadCloser wraps rc so it's automatically closed when ctx is canceled.
|
||||
// Close is idempotent and returns the first error from rc.Close.
|
||||
func newCancelReadCloser(ctx context.Context, rc io.ReadCloser) io.ReadCloser {
|
||||
crc := &cancelReadCloser{
|
||||
rc: rc,
|
||||
close: sync.OnceValue(rc.Close),
|
||||
}
|
||||
context.AfterFunc(ctx, func() { _ = crc.Close() })
|
||||
return crc
|
||||
}
|
||||
|
||||
type cancelReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (c *cancelReadCloser) Read(p []byte) (int, error) { return c.rc.Read(p) }
|
||||
func (c *cancelReadCloser) Close() error { return c.close() }
|
||||
|
||||
6
vendor/github.com/moby/moby/client/volume_prune.go
generated
vendored
6
vendor/github.com/moby/moby/client/volume_prune.go
generated
vendored
@ -20,13 +20,13 @@ type VolumePruneOptions struct {
|
||||
Filters Filters
|
||||
}
|
||||
|
||||
// VolumePruneResult holds the result from the [Client.VolumesPrune] method.
|
||||
// VolumePruneResult holds the result from the [Client.VolumePrune] method.
|
||||
type VolumePruneResult struct {
|
||||
Report volume.PruneReport
|
||||
}
|
||||
|
||||
// VolumesPrune requests the daemon to delete unused data
|
||||
func (cli *Client) VolumesPrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) {
|
||||
// VolumePrune requests the daemon to delete unused data
|
||||
func (cli *Client) VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) {
|
||||
if options.All {
|
||||
if _, ok := options.Filters["all"]; ok {
|
||||
return VolumePruneResult{}, errdefs.ErrInvalidArgument.WithMessage(`conflicting options: cannot specify both "all" and "all" filter`)
|
||||
|
||||
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@ -167,7 +167,7 @@ github.com/moby/docker-image-spec/specs-go/v1
|
||||
github.com/moby/go-archive
|
||||
github.com/moby/go-archive/compression
|
||||
github.com/moby/go-archive/tarheader
|
||||
# github.com/moby/moby/api v1.52.0-beta.4
|
||||
# github.com/moby/moby/api v1.52.0-beta.4.0.20251106210608-f7fd9c315acf => github.com/moby/moby/api v1.52.0-beta.4.0.20251106221347-217fd7890581
|
||||
## explicit; go 1.23.0
|
||||
github.com/moby/moby/api/pkg/authconfig
|
||||
github.com/moby/moby/api/pkg/stdcopy
|
||||
@ -189,7 +189,7 @@ github.com/moby/moby/api/types/storage
|
||||
github.com/moby/moby/api/types/swarm
|
||||
github.com/moby/moby/api/types/system
|
||||
github.com/moby/moby/api/types/volume
|
||||
# github.com/moby/moby/client v0.1.0-beta.3
|
||||
# github.com/moby/moby/client v0.1.0-beta.3.0.20251106221347-217fd7890581
|
||||
## explicit; go 1.23.0
|
||||
github.com/moby/moby/client
|
||||
github.com/moby/moby/client/internal
|
||||
@ -543,3 +543,4 @@ gotest.tools/v3/skip
|
||||
# tags.cncf.io/container-device-interface v1.0.1
|
||||
## explicit; go 1.20
|
||||
tags.cncf.io/container-device-interface/pkg/parser
|
||||
# github.com/moby/moby/api => github.com/moby/moby/api v1.52.0-beta.4.0.20251106221347-217fd7890581
|
||||
|
||||
Reference in New Issue
Block a user