chore: vendor

This commit is contained in:
2024-08-04 11:06:58 +02:00
parent 2a5985e44e
commit 04aec8232f
3557 changed files with 981078 additions and 1 deletions

2456
vendor/github.com/docker/docker/AUTHORS generated vendored Normal file

File diff suppressed because it is too large Load Diff

191
vendor/github.com/docker/docker/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/docker/NOTICE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/creack/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

42
vendor/github.com/docker/docker/api/README.md generated vendored Normal file
View File

@ -0,0 +1,42 @@
# Working on the Engine API
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
It consists of various components in this repository:
- `api/swagger.yaml` A Swagger definition of the API.
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
- `cli/` The command-line client.
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
- `daemon/` The daemon, which serves the API.
## Swagger definition
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. Automatically generate documentation.
2. Automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
The file is split into two main sections:
- `definitions`, which defines re-usable objects used in requests and responses
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
## Viewing the API documentation
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).

20
vendor/github.com/docker/docker/api/common.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.46"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
// may be configured with a different minimum API version, as returned
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
//
// API requests for API versions lower than the configured version produce
// an error.
MinSupportedAPIVersion = "1.24"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier = "scratch"
)

12
vendor/github.com/docker/docker/api/swagger-gen.yaml generated vendored Normal file
View File

@ -0,0 +1,12 @@
layout:
models:
- name: definition
source: asset:model
target: "{{ joinFilePath .Target .ModelPackage }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"
operations:
- name: handler
source: asset:serverOperation
target: "{{ joinFilePath .Target .APIPackage .Package }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"

12770
vendor/github.com/docker/docker/api/swagger.yaml generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
import "fmt"
// WeightDevice is a structure that holds device:weight pair
type WeightDevice struct {
Path string
Weight uint16
}
func (w *WeightDevice) String() string {
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct {
Path string
Rate uint64
}
func (t *ThrottleDevice) String() string {
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
}

View File

@ -0,0 +1,7 @@
package checkpoint
// Summary represents the details of a checkpoint when listing endpoints.
type Summary struct {
// Name is the name of the checkpoint.
Name string
}

View File

@ -0,0 +1,19 @@
package checkpoint
// CreateOptions holds parameters to create a checkpoint from a container.
type CreateOptions struct {
CheckpointID string
CheckpointDir string
Exit bool
}
// ListOptions holds parameters to list checkpoints for a container.
type ListOptions struct {
CheckpointDir string
}
// DeleteOptions holds parameters to delete a checkpoint from a container.
type DeleteOptions struct {
CheckpointID string
CheckpointDir string
}

257
vendor/github.com/docker/docker/api/types/client.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package types // import "github.com/docker/docker/api/types"
import (
"bufio"
"context"
"io"
"net"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
)
// NewHijackedResponse intializes a HijackedResponse type
func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
}
// HijackedResponse holds connection information for a hijacked request.
type HijackedResponse struct {
mediaType string
Conn net.Conn
Reader *bufio.Reader
}
// Close closes the hijacked connection and reader.
func (h *HijackedResponse) Close() {
h.Conn.Close()
}
// MediaType let client know if HijackedResponse hold a raw or multiplexed stream.
// returns false if HTTP Content-Type is not relevant, and container must be inspected
func (h *HijackedResponse) MediaType() (string, bool) {
if h.mediaType == "" {
return "", false
}
return h.mediaType, true
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// CloseWrite closes a readWriter for writing.
func (h *HijackedResponse) CloseWrite() error {
if conn, ok := h.Conn.(CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// ImageBuildOptions holds the information
// necessary to build images.
type ImageBuildOptions struct {
Tags []string
SuppressOutput bool
RemoteContext string
NoCache bool
Remove bool
ForceRemove bool
PullParent bool
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64
CPUQuota int64
CPUPeriod int64
Memory int64
MemorySwap int64
CgroupParent string
NetworkMode string
ShmSize int64
Dockerfile string
Ulimits []*container.Ulimit
// BuildArgs needs to be a *string instead of just a string so that
// we can tell the difference between "" (empty string) and no value
// at all (nil). See the parsing of buildArgs in
// api/server/router/build/build_routes.go for even more info.
BuildArgs map[string]*string
AuthConfigs map[string]registry.AuthConfig
Context io.Reader
Labels map[string]string
// squash the resulting image's layers to the parent
// preserves the original image and creates a new one from the parent with all
// the changes applied to a single layer
Squash bool
// CacheFrom specifies images that are used for matching cache. Images
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
ExtraHosts []string // List of extra hosts
Target string
SessionID string
Platform string
// Version specifies the version of the underlying builder to use
Version BuilderVersion
// BuildID is an optional identifier that can be passed together with the
// build request. The same identifier can be used to gracefully cancel the
// build with the cancel request.
BuildID string
// Outputs defines configurations for exporting build results. Only supported
// in BuildKit mode
Outputs []ImageBuildOutput
}
// ImageBuildOutput defines configuration for exporting a build result
type ImageBuildOutput struct {
Type string
Attrs map[string]string
}
// BuilderVersion sets the version of underlying builder to use
type BuilderVersion string
const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
BuilderBuildKit BuilderVersion = "2"
)
// ImageBuildResponse holds information
// returned by a server after building
// an image.
type ImageBuildResponse struct {
Body io.ReadCloser
OSType string
}
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func(context.Context) (string, error)
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filters filters.Args
}
// NodeRemoveOptions holds parameters to remove nodes with.
type NodeRemoveOptions struct {
Force bool
}
// ServiceCreateOptions contains the options to use when creating a service.
type ServiceCreateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// Values for RegistryAuthFrom in ServiceUpdateOptions
const (
RegistryAuthFromSpec = "spec"
RegistryAuthFromPreviousSpec = "previous-spec"
)
// ServiceUpdateOptions contains the options to be used for updating services.
type ServiceUpdateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
// into this field. While it does open API users up to racy writes, most
// users may not need that level of consistency in practice.
// RegistryAuthFrom specifies where to find the registry authorization
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
// Rollback indicates whether a server-side rollback should be
// performed. When this is set, the provided spec will be ignored.
// The valid values are "previous" and "none". An empty value is the
// same as "none".
Rollback string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filters filters.Args
// Status indicates whether the server should include the service task
// count of running and desired tasks.
Status bool
}
// ServiceInspectOptions holds parameters related to the "service inspect"
// operation.
type ServiceInspectOptions struct {
InsertDefaults bool
}
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filters filters.Args
}
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginDisableOptions holds parameters to disable plugins.
type PluginDisableOptions struct {
Force bool
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
Args []string
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
}

View File

@ -0,0 +1,15 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ChangeType Kind of change
//
// Can be one of:
//
// - `0`: Modified ("C")
// - `1`: Added ("A")
// - `2`: Deleted ("D")
//
// swagger:model ChangeType
type ChangeType uint8

View File

@ -0,0 +1,23 @@
package container
const (
// ChangeModify represents the modify operation.
ChangeModify ChangeType = 0
// ChangeAdd represents the add operation.
ChangeAdd ChangeType = 1
// ChangeDelete represents the delete operation.
ChangeDelete ChangeType = 2
)
func (ct ChangeType) String() string {
switch ct {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
default:
return ""
}
}

View File

@ -0,0 +1,73 @@
package container // import "github.com/docker/docker/api/types/container"
import (
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
)
// MinimumDuration puts a minimum on user configured duration.
// This is to prevent API error on time unit. For example, API may
// set 3 as healthcheck interval with intention of 3 seconds, but
// Docker interprets it as 3 nanoseconds.
const MinimumDuration = 1 * time.Millisecond
// StopOptions holds the options to stop or restart a container.
type StopOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If not value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
// to stop gracefully before forcibly terminating it with SIGKILL.
//
// - Use nil to use the default timeout (10 seconds).
// - Use '-1' to wait indefinitely.
// - Use '0' to not wait for the container to exit gracefully, and
// immediately proceeds to forcibly terminating the container.
// - Other positive values are used as timeout (in seconds).
Timeout *int `json:",omitempty"`
}
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig = dockerspec.HealthcheckConfig
// Config contains the configuration data about a container.
// It should hold only portable information about the container.
// Here, "portable" means "independent from the host we are running on".
// Non-portable information *should* appear in HostConfig.
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
// Mac Address of the container.
//
// Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead.
MacAddress string `json:",omitempty"`
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}

View File

@ -0,0 +1,44 @@
package container
import (
"io"
"os"
"time"
)
// PruneReport contains the response for Engine API:
// POST "/containers/prune"
type PruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// PathStat is used to encode the header from
// GET "/containers/{name:.*}/archive"
// "Name" is the file or directory name.
type PathStat struct {
Name string `json:"name"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
Mtime time.Time `json:"mtime"`
LinkTarget string `json:"linkTarget"`
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
CopyUIDGID bool
}
// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
// for a container, as produced by the GET "/stats" endpoint.
//
// The OSType field is set to the server's platform to allow
// platform-specific handling of the response.
//
// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
type StatsResponseReader struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}

View File

@ -0,0 +1,22 @@
package container // import "github.com/docker/docker/api/types/container"
// ----------------------------------------------------------------------------
// Code generated by `swagger generate operation`. DO NOT EDIT.
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerTopOKBody OK response to ContainerTop operation
// swagger:model ContainerTopOKBody
type ContainerTopOKBody struct {
// Each process running in the container, where each is process
// is an array of values corresponding to the titles.
//
// Required: true
Processes [][]string `json:"Processes"`
// The ps column titles
// Required: true
Titles []string `json:"Titles"`
}

View File

@ -0,0 +1,16 @@
package container // import "github.com/docker/docker/api/types/container"
// ----------------------------------------------------------------------------
// Code generated by `swagger generate operation`. DO NOT EDIT.
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerUpdateOKBody OK response to ContainerUpdate operation
// swagger:model ContainerUpdateOKBody
type ContainerUpdateOKBody struct {
// warnings
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,13 @@
package container
import "github.com/docker/docker/api/types/network"
// CreateRequest is the request message sent to the server for container
// create calls. It is a config wrapper that holds the container [Config]
// (portable) and the corresponding [HostConfig] (non-portable) and
// [network.NetworkingConfig].
type CreateRequest struct {
*Config
HostConfig *HostConfig `json:"HostConfig,omitempty"`
NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"`
}

View File

@ -0,0 +1,19 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// CreateResponse ContainerCreateResponse
//
// OK response to ContainerCreate operation
// swagger:model CreateResponse
type CreateResponse struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,9 @@
package container
type errInvalidParameter struct{ error }
func (e *errInvalidParameter) InvalidParameter() {}
func (e *errInvalidParameter) Unwrap() error {
return e.error
}

View File

@ -0,0 +1,43 @@
package container
// ExecOptions is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecOptions struct {
User string // User that will run the command
Privileged bool // Is the container in privileged mode
Tty bool // Attach standard streams to a tty.
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStderr bool // Attach the standard error
AttachStdout bool // Attach the standard output
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Env []string // Environment variables
WorkingDir string // Working directory
Cmd []string // Execution commands and args
}
// ExecStartOptions is a temp struct used by execStart
// Config fields is part of ExecConfig in runconfig package
type ExecStartOptions struct {
// ExecStart will first check if it's detached
Detach bool
// Check if there's a tty
Tty bool
// Terminal size [height, width], unused if Tty == false
ConsoleSize *[2]uint `json:",omitempty"`
}
// ExecAttachOptions is a temp struct used by execAttach.
//
// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
type ExecAttachOptions = ExecStartOptions
// ExecInspect holds information returned by exec inspect.
type ExecInspect struct {
ExecID string `json:"ID"`
ContainerID string
Running bool
ExitCode int
Pid int
}

View File

@ -0,0 +1,19 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// FilesystemChange Change in the container's filesystem.
//
// swagger:model FilesystemChange
type FilesystemChange struct {
// kind
// Required: true
Kind ChangeType `json:"Kind"`
// Path to file or directory that has changed.
//
// Required: true
Path string `json:"Path"`
}

View File

@ -0,0 +1,500 @@
package container // import "github.com/docker/docker/api/types/container"
import (
"fmt"
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
)
// CgroupnsMode represents the cgroup namespace mode of the container
type CgroupnsMode string
// cgroup namespace modes for containers
const (
CgroupnsModeEmpty CgroupnsMode = ""
CgroupnsModePrivate CgroupnsMode = "private"
CgroupnsModeHost CgroupnsMode = "host"
)
// IsPrivate indicates whether the container uses its own private cgroup namespace
func (c CgroupnsMode) IsPrivate() bool {
return c == CgroupnsModePrivate
}
// IsHost indicates whether the container shares the host's cgroup namespace
func (c CgroupnsMode) IsHost() bool {
return c == CgroupnsModeHost
}
// IsEmpty indicates whether the container cgroup namespace mode is unset
func (c CgroupnsMode) IsEmpty() bool {
return c == CgroupnsModeEmpty
}
// Valid indicates whether the cgroup namespace mode is valid
func (c CgroupnsMode) Valid() bool {
return c.IsEmpty() || c.IsPrivate() || c.IsHost()
}
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
// Isolation modes for containers
const (
IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default)
IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon
IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode
IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode
)
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
// TODO consider making isolation-mode strict (case-sensitive)
v := Isolation(strings.ToLower(string(i)))
return v == IsolationDefault || v == IsolationEmpty
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
// TODO consider making isolation-mode strict (case-sensitive)
return Isolation(strings.ToLower(string(i))) == IsolationHyperV
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
// TODO consider making isolation-mode strict (case-sensitive)
return Isolation(strings.ToLower(string(i))) == IsolationProcess
}
// IpcMode represents the container ipc stack.
type IpcMode string
// IpcMode constants
const (
IPCModeNone IpcMode = "none"
IPCModeHost IpcMode = "host"
IPCModeContainer IpcMode = "container"
IPCModePrivate IpcMode = "private"
IPCModeShareable IpcMode = "shareable"
)
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
func (n IpcMode) IsPrivate() bool {
return n == IPCModePrivate
}
// IsHost indicates whether the container shares the host's ipc namespace.
func (n IpcMode) IsHost() bool {
return n == IPCModeHost
}
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
func (n IpcMode) IsShareable() bool {
return n == IPCModeShareable
}
// IsContainer indicates whether the container uses another container's ipc namespace.
func (n IpcMode) IsContainer() bool {
_, ok := containerID(string(n))
return ok
}
// IsNone indicates whether container IpcMode is set to "none".
func (n IpcMode) IsNone() bool {
return n == IPCModeNone
}
// IsEmpty indicates whether container IpcMode is empty
func (n IpcMode) IsEmpty() bool {
return n == ""
}
// Valid indicates whether the ipc mode is valid.
func (n IpcMode) Valid() bool {
// TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() (idOrName string) {
idOrName, _ = containerID(string(n))
return idOrName
}
// NetworkMode represents the container network stack.
type NetworkMode string
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == network.NetworkNone
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == network.NetworkDefault
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
_, ok := containerID(string(n))
return ok
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() (idOrName string) {
idOrName, _ = containerID(string(n))
return idOrName
}
// UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
// IsHost indicates whether the container uses the host's userns.
func (n UsernsMode) IsHost() bool {
return n == "host"
}
// IsPrivate indicates whether the container uses the a private userns.
func (n UsernsMode) IsPrivate() bool {
return !n.IsHost()
}
// Valid indicates whether the userns is valid.
func (n UsernsMode) Valid() bool {
return n == "" || n.IsHost()
}
// CgroupSpec represents the cgroup to use for the container.
type CgroupSpec string
// IsContainer indicates whether the container is using another container cgroup
func (c CgroupSpec) IsContainer() bool {
_, ok := containerID(string(c))
return ok
}
// Valid indicates whether the cgroup spec is valid.
func (c CgroupSpec) Valid() bool {
// TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
return c == "" || c.IsContainer()
}
// Container returns the ID or name of the container whose cgroup will be used.
func (c CgroupSpec) Container() (idOrName string) {
idOrName, _ = containerID(string(c))
return idOrName
}
// UTSMode represents the UTS namespace of the container.
type UTSMode string
// IsPrivate indicates whether the container uses its private UTS namespace.
func (n UTSMode) IsPrivate() bool {
return !n.IsHost()
}
// IsHost indicates whether the container uses the host's UTS namespace.
func (n UTSMode) IsHost() bool {
return n == "host"
}
// Valid indicates whether the UTS namespace is valid.
func (n UTSMode) Valid() bool {
return n == "" || n.IsHost()
}
// PidMode represents the pid namespace of the container.
type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's pid namespace.
func (n PidMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's pid namespace.
func (n PidMode) IsContainer() bool {
_, ok := containerID(string(n))
return ok
}
// Valid indicates whether the pid namespace is valid.
func (n PidMode) Valid() bool {
return n == "" || n.IsHost() || validContainer(string(n))
}
// Container returns the name of the container whose pid namespace is going to be used.
func (n PidMode) Container() (idOrName string) {
idOrName, _ = containerID(string(n))
return idOrName
}
// DeviceRequest represents a request for devices from a device driver.
// Used by GPU device drivers.
type DeviceRequest struct {
Driver string // Name of device driver
Count int // Number of devices to request (-1 = All)
DeviceIDs []string // List of device IDs as recognizable by the device driver
Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
Options map[string]string // Options to pass onto the device driver
}
// DeviceMapping represents the device mapping between the host and the container.
type DeviceMapping struct {
PathOnHost string
PathInContainer string
CgroupPermissions string
}
// RestartPolicy represents the restart policies of the container.
type RestartPolicy struct {
Name RestartPolicyMode
MaximumRetryCount int
}
type RestartPolicyMode string
const (
RestartPolicyDisabled RestartPolicyMode = "no"
RestartPolicyAlways RestartPolicyMode = "always"
RestartPolicyOnFailure RestartPolicyMode = "on-failure"
RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped"
)
// IsNone indicates whether the container has the "no" restart policy.
// This means the container will not automatically restart when exiting.
func (rp *RestartPolicy) IsNone() bool {
return rp.Name == RestartPolicyDisabled || rp.Name == ""
}
// IsAlways indicates whether the container has the "always" restart policy.
// This means the container will automatically restart regardless of the exit status.
func (rp *RestartPolicy) IsAlways() bool {
return rp.Name == RestartPolicyAlways
}
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
// This means the container will automatically restart of exiting with a non-zero exit status.
func (rp *RestartPolicy) IsOnFailure() bool {
return rp.Name == RestartPolicyOnFailure
}
// IsUnlessStopped indicates whether the container has the
// "unless-stopped" restart policy. This means the container will
// automatically restart unless user has put it to stopped state.
func (rp *RestartPolicy) IsUnlessStopped() bool {
return rp.Name == RestartPolicyUnlessStopped
}
// IsSame compares two RestartPolicy to see if they are the same
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// ValidateRestartPolicy validates the given RestartPolicy.
func ValidateRestartPolicy(policy RestartPolicy) error {
switch policy.Name {
case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled:
if policy.MaximumRetryCount != 0 {
msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'"
if policy.MaximumRetryCount < 0 {
msg += " and cannot be negative"
}
return &errInvalidParameter{fmt.Errorf(msg)}
}
return nil
case RestartPolicyOnFailure:
if policy.MaximumRetryCount < 0 {
return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
}
return nil
case "":
// Versions before v25.0.0 created an empty restart-policy "name" as
// default. Allow an empty name with "any" MaximumRetryCount for
// backward-compatibility.
return nil
default:
return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)}
}
}
// LogMode is a type to define the available modes for logging
// These modes affect how logs are handled when log messages start piling up.
type LogMode string
// Available logging modes
const (
LogModeUnset LogMode = ""
LogModeBlocking LogMode = "blocking"
LogModeNonBlock LogMode = "non-blocking"
)
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
Config map[string]string
}
// Ulimit is an alias for [units.Ulimit], which may be moving to a different
// location or become a local type. This alias is to help transitioning.
//
// Users are recommended to use this alias instead of using [units.Ulimit] directly.
type Ulimit = units.Ulimit
// Resources contains container's resources (cgroups config, ulimits...)
type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
BlkioWeightDevice []*blkiodev.WeightDevice
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DeviceCgroupRules []string // List of rule to be added to the device cgroup
DeviceRequests []DeviceRequest // List of device requests for device drivers
// KernelMemory specifies the kernel memory limit (in bytes) for the container.
// Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes.
KernelMemory int64 `json:",omitempty"`
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
OomKillDisable *bool // Whether to disable OOM Killer or not
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
Ulimits []*Ulimit // List of ulimits to be set in the container
// Applicable to Windows
CPUCount int64 `json:"CpuCount"` // CPU count
CPUPercent int64 `json:"CpuPercent"` // CPU percent
IOMaximumIOps uint64 // Maximum IOps for the container system drive
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
}
// UpdateConfig holds the mutable attributes of a Container.
// Those attributes can be updated at runtime.
type UpdateConfig struct {
// Contains container's resources (cgroups, ulimits)
Resources
RestartPolicy RestartPolicy
}
// HostConfig the non-portable Config structure of a container.
// Here, "non-portable" means "dependent of the host we are running on".
// Portable information *should* appear in Config.
type HostConfig struct {
// Applicable to all platforms
Binds []string // List of volume bindings for this container
ContainerIDFile string // File (path) where the containerId is written
LogConfig LogConfig // Configuration of the logs for this container
NetworkMode NetworkMode // Network mode to use for the container
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
RestartPolicy RestartPolicy // Restart policy to be used for the container
AutoRemove bool // Automatically remove container when it exits
VolumeDriver string // Name of the volume driver used to mount volumes
VolumesFrom []string // List of volumes to take from other container
ConsoleSize [2]uint // Initial console size (height,width)
Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
ExtraHosts []string // List of extra hosts
GroupAdd []string // List of additional groups that the container process will run as
IpcMode IpcMode // IPC namespace to use for the container
Cgroup CgroupSpec // Cgroup to use for the container
Links []string // List of links (in the name:alias form)
OomScoreAdj int // Container preference for OOM-killing
PidMode PidMode // PID namespace to use for the container
Privileged bool // Is the container in privileged mode
PublishAllPorts bool // Should docker publish all exposed port for the container
ReadonlyRootfs bool // Is the container root filesystem in read-only
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
UTSMode UTSMode // UTS namespace to use for the container
UsernsMode UsernsMode // The user namespace to use for the container
ShmSize int64 // Total shm memory usage
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
Runtime string `json:",omitempty"` // Runtime to use with this container
// Applicable to Windows
Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
// Mounts specs used by the container
Mounts []mount.Mount `json:",omitempty"`
// MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths)
MaskedPaths []string
// ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths)
ReadonlyPaths []string
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
}
// containerID splits "container:<ID|name>" values. It returns the container
// ID or name, and whether an ID/name was found. It returns an empty string and
// a "false" if the value does not have a "container:" prefix. Further validation
// of the returned, including checking if the value is empty, should be handled
// by the caller.
func containerID(val string) (idOrName string, ok bool) {
k, v, hasSep := strings.Cut(val, ":")
if !hasSep || k != "container" {
return "", false
}
return v, true
}
// validContainer checks if the given value is a "container:" mode with
// a non-empty name/ID.
func validContainer(val string) bool {
id, ok := containerID(val)
return ok && id != ""
}

View File

@ -0,0 +1,45 @@
//go:build !windows
package container // import "github.com/docker/docker/api/types/container"
import "github.com/docker/docker/api/types/network"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == network.NetworkBridge
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == network.NetworkHost
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
switch {
case n.IsDefault():
return network.NetworkDefault
case n.IsBridge():
return network.NetworkBridge
case n.IsHost():
return network.NetworkHost
case n.IsNone():
return network.NetworkNone
case n.IsContainer():
return "container"
case n.IsUserDefined():
return n.UserDefined()
default:
return ""
}
}

View File

@ -0,0 +1,47 @@
package container // import "github.com/docker/docker/api/types/container"
import "github.com/docker/docker/api/types/network"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
return n == network.NetworkNat
}
// IsHost indicates whether container uses the host network stack.
// returns false as this is not supported by windows
func (n NetworkMode) IsHost() bool {
return false
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
switch {
case n.IsDefault():
return network.NetworkDefault
case n.IsBridge():
return network.NetworkNat
case n.IsHost():
// Windows currently doesn't support host network-mode, so
// this would currently never happen..
return network.NetworkHost
case n.IsNone():
return network.NetworkNone
case n.IsContainer():
return "container"
case n.IsUserDefined():
return n.UserDefined()
default:
return ""
}
}

View File

@ -0,0 +1,67 @@
package container
import "github.com/docker/docker/api/types/filters"
// ResizeOptions holds parameters to resize a TTY.
// It can be used to resize container TTYs and
// exec process TTYs too.
type ResizeOptions struct {
Height uint
Width uint
}
// AttachOptions holds parameters to attach to a container.
type AttachOptions struct {
Stream bool
Stdin bool
Stdout bool
Stderr bool
DetachKeys string
Logs bool
}
// CommitOptions holds parameters to commit changes into a container.
type CommitOptions struct {
Reference string
Comment string
Author string
Changes []string
Pause bool
Config *Config
}
// RemoveOptions holds parameters to remove containers.
type RemoveOptions struct {
RemoveVolumes bool
RemoveLinks bool
Force bool
}
// StartOptions holds parameters to start containers.
type StartOptions struct {
CheckpointID string
CheckpointDir string
}
// ListOptions holds parameters to list containers with.
type ListOptions struct {
Size bool
All bool
Latest bool
Since string
Before string
Limit int
Filters filters.Args
}
// LogsOptions holds parameters to filter logs with.
type LogsOptions struct {
ShowStdout bool
ShowStderr bool
Since string
Until string
Timestamps bool
Follow bool
Tail string
Details bool
}

View File

@ -0,0 +1,181 @@
package container
import "time"
// ThrottlingData stores CPU throttling stats of one running container.
// Not used on Windows.
type ThrottlingData struct {
// Number of periods with throttling active
Periods uint64 `json:"periods"`
// Number of periods when the container hits its throttling limit.
ThrottledPeriods uint64 `json:"throttled_periods"`
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64 `json:"throttled_time"`
}
// CPUUsage stores All CPU stats aggregated since container inception.
type CPUUsage struct {
// Total CPU time consumed.
// Units: nanoseconds (Linux)
// Units: 100's of nanoseconds (Windows)
TotalUsage uint64 `json:"total_usage"`
// Total CPU time consumed per core (Linux). Not used on Windows.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
// Time spent by tasks of the cgroup in kernel mode (Linux).
// Time spent by all container processes in kernel mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
// Time spent by tasks of the cgroup in user mode (Linux).
// Time spent by all container processes in user mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
UsageInUsermode uint64 `json:"usage_in_usermode"`
}
// CPUStats aggregates and wraps all CPU related info of container
type CPUStats struct {
// CPU Usage. Linux and Windows.
CPUUsage CPUUsage `json:"cpu_usage"`
// System Usage. Linux only.
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
// Online CPUs. Linux only.
OnlineCPUs uint32 `json:"online_cpus,omitempty"`
// Throttling Data. Linux only.
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
// MemoryStats aggregates all memory stats since container inception on Linux.
// Windows returns stats for commit and private working set only.
type MemoryStats struct {
// Linux Memory Stats
// current res_counter usage for memory
Usage uint64 `json:"usage,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// TODO(vishh): Export these as stronger types.
// all the stats exported via memory.stat.
Stats map[string]uint64 `json:"stats,omitempty"`
// number of times memory usage hits limits.
Failcnt uint64 `json:"failcnt,omitempty"`
Limit uint64 `json:"limit,omitempty"`
// Windows Memory Stats
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
// committed bytes
Commit uint64 `json:"commitbytes,omitempty"`
// peak committed bytes
CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
// private working set
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
}
// BlkioStatEntry is one small entity to store a piece of Blkio stats
// Not used on Windows.
type BlkioStatEntry struct {
Major uint64 `json:"major"`
Minor uint64 `json:"minor"`
Op string `json:"op"`
Value uint64 `json:"value"`
}
// BlkioStats stores All IO service stats for data read and write.
// This is a Linux specific structure as the differences between expressing
// block I/O on Windows and Linux are sufficiently significant to make
// little sense attempting to morph into a combined structure.
type BlkioStats struct {
// number of bytes transferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
}
// StorageStats is the disk I/O stats for read/write on Windows.
type StorageStats struct {
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
}
// NetworkStats aggregates the network stats of one container
type NetworkStats struct {
// Bytes received. Windows and Linux.
RxBytes uint64 `json:"rx_bytes"`
// Packets received. Windows and Linux.
RxPackets uint64 `json:"rx_packets"`
// Received errors. Not used on Windows. Note that we don't `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
RxErrors uint64 `json:"rx_errors"`
// Incoming packets dropped. Windows and Linux.
RxDropped uint64 `json:"rx_dropped"`
// Bytes sent. Windows and Linux.
TxBytes uint64 `json:"tx_bytes"`
// Packets sent. Windows and Linux.
TxPackets uint64 `json:"tx_packets"`
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
TxErrors uint64 `json:"tx_errors"`
// Outgoing packets dropped. Windows and Linux.
TxDropped uint64 `json:"tx_dropped"`
// Endpoint ID. Not used on Linux.
EndpointID string `json:"endpoint_id,omitempty"`
// Instance ID. Not used on Linux.
InstanceID string `json:"instance_id,omitempty"`
}
// PidsStats contains the stats of a container's pids
type PidsStats struct {
// Current is the number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
// Limit is the hard limit on the number of pids in the cgroup.
// A "Limit" of 0 means that there is no limit.
Limit uint64 `json:"limit,omitempty"`
}
// Stats is Ultimate struct aggregating all types of stats of one container
type Stats struct {
// Common stats
Read time.Time `json:"read"`
PreRead time.Time `json:"preread"`
// Linux specific stats, not populated on Windows.
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// Windows specific stats, not populated on Linux.
NumProcs uint32 `json:"num_procs"`
StorageStats StorageStats `json:"storage_stats,omitempty"`
// Shared stats
CPUStats CPUStats `json:"cpu_stats,omitempty"`
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
}
// StatsResponse is newly used Networks.
//
// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801
type StatsResponse struct {
Stats
Name string `json:"name,omitempty"`
ID string `json:"id,omitempty"`
// Networks request version >=1.21
Networks map[string]NetworkStats `json:"networks,omitempty"`
}

View File

@ -0,0 +1,12 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// WaitExitError container waiting error, if any
// swagger:model WaitExitError
type WaitExitError struct {
// Details of an error
Message string `json:"Message,omitempty"`
}

View File

@ -0,0 +1,18 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// WaitResponse ContainerWaitResponse
//
// OK response to ContainerWait operation
// swagger:model WaitResponse
type WaitResponse struct {
// error
Error *WaitExitError `json:"Error,omitempty"`
// Exit code of the container
// Required: true
StatusCode int64 `json:"StatusCode"`
}

View File

@ -0,0 +1,22 @@
package container // import "github.com/docker/docker/api/types/container"
// WaitCondition is a type used to specify a container state for which
// to wait.
type WaitCondition string
// Possible WaitCondition Values.
//
// WaitConditionNotRunning (default) is used to wait for any of the non-running
// states: "created", "exited", "dead", "removing", or "removed".
//
// WaitConditionNextExit is used to wait for the next time the state changes
// to a non-running state. If the state is currently "created" or "exited",
// this would cause Wait() to block until either the container runs and exits
// or is removed.
//
// WaitConditionRemoved is used to wait for the container to be removed.
const (
WaitConditionNotRunning WaitCondition = "not-running"
WaitConditionNextExit WaitCondition = "next-exit"
WaitConditionRemoved WaitCondition = "removed"
)

View File

@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ErrorResponse Represents an error.
// swagger:model ErrorResponse
type ErrorResponse struct {
// The error message.
// Required: true
Message string `json:"message"`
}

View File

@ -0,0 +1,6 @@
package types
// Error returns the error message
func (e ErrorResponse) Error() string {
return e.Message
}

View File

@ -0,0 +1,135 @@
package events // import "github.com/docker/docker/api/types/events"
import "github.com/docker/docker/api/types/filters"
// Type is used for event-types.
type Type string
// List of known event types.
const (
BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates.
ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate.
ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate.
DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate.
ImageEventType Type = "image" // ImageEventType is the event type that images generate.
NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate.
NodeEventType Type = "node" // NodeEventType is the event type that nodes generate.
PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate.
SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate.
ServiceEventType Type = "service" // ServiceEventType is the event type that services generate.
VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate.
)
// Action is used for event-actions.
type Action string
const (
ActionCreate Action = "create"
ActionStart Action = "start"
ActionRestart Action = "restart"
ActionStop Action = "stop"
ActionCheckpoint Action = "checkpoint"
ActionPause Action = "pause"
ActionUnPause Action = "unpause"
ActionAttach Action = "attach"
ActionDetach Action = "detach"
ActionResize Action = "resize"
ActionUpdate Action = "update"
ActionRename Action = "rename"
ActionKill Action = "kill"
ActionDie Action = "die"
ActionOOM Action = "oom"
ActionDestroy Action = "destroy"
ActionRemove Action = "remove"
ActionCommit Action = "commit"
ActionTop Action = "top"
ActionCopy Action = "copy"
ActionArchivePath Action = "archive-path"
ActionExtractToDir Action = "extract-to-dir"
ActionExport Action = "export"
ActionImport Action = "import"
ActionSave Action = "save"
ActionLoad Action = "load"
ActionTag Action = "tag"
ActionUnTag Action = "untag"
ActionPush Action = "push"
ActionPull Action = "pull"
ActionPrune Action = "prune"
ActionDelete Action = "delete"
ActionEnable Action = "enable"
ActionDisable Action = "disable"
ActionConnect Action = "connect"
ActionDisconnect Action = "disconnect"
ActionReload Action = "reload"
ActionMount Action = "mount"
ActionUnmount Action = "unmount"
// ActionExecCreate is the prefix used for exec_create events. These
// event-actions are commonly followed by a colon and space (": "),
// and the command that's defined for the exec, for example:
//
// exec_create: /bin/sh -c 'echo hello'
//
// This is far from ideal; it's a compromise to allow filtering and
// to preserve backward-compatibility.
ActionExecCreate Action = "exec_create"
// ActionExecStart is the prefix used for exec_create events. These
// event-actions are commonly followed by a colon and space (": "),
// and the command that's defined for the exec, for example:
//
// exec_start: /bin/sh -c 'echo hello'
//
// This is far from ideal; it's a compromise to allow filtering and
// to preserve backward-compatibility.
ActionExecStart Action = "exec_start"
ActionExecDie Action = "exec_die"
ActionExecDetach Action = "exec_detach"
// ActionHealthStatus is the prefix to use for health_status events.
//
// Health-status events can either have a pre-defined status, in which
// case the "health_status" action is followed by a colon, or can be
// "free-form", in which case they're followed by the output of the
// health-check output.
//
// This is far form ideal, and a compromise to allow filtering, and
// to preserve backward-compatibility.
ActionHealthStatus Action = "health_status"
ActionHealthStatusRunning Action = "health_status: running"
ActionHealthStatusHealthy Action = "health_status: healthy"
ActionHealthStatusUnhealthy Action = "health_status: unhealthy"
)
// Actor describes something that generates events,
// like a container, or a network, or a volume.
// It has a defined name and a set of attributes.
// The container attributes are its labels, other actors
// can generate these attributes from other properties.
type Actor struct {
ID string
Attributes map[string]string
}
// Message represents the information an event contains
type Message struct {
// Deprecated information from JSONMessage.
// With data only in container events.
Status string `json:"status,omitempty"` // Deprecated: use Action instead.
ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead.
From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead.
Type Type
Action Action
Actor Actor
// Engine events are local scope. Cluster events are swarm scope.
Scope string `json:"scope,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
}
// ListOptions holds parameters to filter events with.
type ListOptions struct {
Since string
Until string
Filters filters.Args
}

View File

@ -0,0 +1,37 @@
package filters
import "fmt"
// invalidFilter indicates that the provided filter or its value is invalid
type invalidFilter struct {
Filter string
Value []string
}
func (e invalidFilter) Error() string {
msg := "invalid filter"
if e.Filter != "" {
msg += " '" + e.Filter
if e.Value != nil {
msg = fmt.Sprintf("%s=%s", msg, e.Value)
}
msg += "'"
}
return msg
}
// InvalidParameter marks this error as ErrInvalidParameter
func (e invalidFilter) InvalidParameter() {}
// unreachableCode is an error indicating that the code path was not expected to be reached.
type unreachableCode struct {
Filter string
Value []string
}
// System marks this error as ErrSystem
func (e unreachableCode) System() {}
func (e unreachableCode) Error() string {
return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value)
}

View File

@ -0,0 +1,346 @@
/*
Package filters provides tools for encoding a mapping of keys to a set of
multiple values.
*/
package filters // import "github.com/docker/docker/api/types/filters"
import (
"encoding/json"
"regexp"
"strings"
"github.com/docker/docker/api/types/versions"
)
// Args stores a mapping of keys to a set of multiple values.
type Args struct {
fields map[string]map[string]bool
}
// KeyValuePair are used to initialize a new Args
type KeyValuePair struct {
Key string
Value string
}
// Arg creates a new KeyValuePair for initializing Args
func Arg(key, value string) KeyValuePair {
return KeyValuePair{Key: key, Value: value}
}
// NewArgs returns a new Args populated with the initial args
func NewArgs(initialArgs ...KeyValuePair) Args {
args := Args{fields: map[string]map[string]bool{}}
for _, arg := range initialArgs {
args.Add(arg.Key, arg.Value)
}
return args
}
// Keys returns all the keys in list of Args
func (args Args) Keys() []string {
keys := make([]string, 0, len(args.fields))
for k := range args.fields {
keys = append(keys, k)
}
return keys
}
// MarshalJSON returns a JSON byte representation of the Args
func (args Args) MarshalJSON() ([]byte, error) {
if len(args.fields) == 0 {
return []byte("{}"), nil
}
return json.Marshal(args.fields)
}
// ToJSON returns the Args as a JSON encoded string
func ToJSON(a Args) (string, error) {
if a.Len() == 0 {
return "", nil
}
buf, err := json.Marshal(a)
return string(buf), err
}
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
// then the encoded format will use an older legacy format where the values are a
// list of strings, instead of a set.
//
// Deprecated: do not use in any new code; use ToJSON instead
func ToParamWithVersion(version string, a Args) (string, error) {
if a.Len() == 0 {
return "", nil
}
if version != "" && versions.LessThan(version, "1.22") {
buf, err := json.Marshal(convertArgsToSlice(a.fields))
return string(buf), err
}
return ToJSON(a)
}
// FromJSON decodes a JSON encoded string into Args
func FromJSON(p string) (Args, error) {
args := NewArgs()
if p == "" {
return args, nil
}
raw := []byte(p)
err := json.Unmarshal(raw, &args)
if err == nil {
return args, nil
}
// Fallback to parsing arguments in the legacy slice format
deprecated := map[string][]string{}
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
return args, &invalidFilter{}
}
args.fields = deprecatedArgs(deprecated)
return args, nil
}
// UnmarshalJSON populates the Args from JSON encode bytes
func (args Args) UnmarshalJSON(raw []byte) error {
return json.Unmarshal(raw, &args.fields)
}
// Get returns the list of values associated with the key
func (args Args) Get(key string) []string {
values := args.fields[key]
if values == nil {
return make([]string, 0)
}
slice := make([]string, 0, len(values))
for key := range values {
slice = append(slice, key)
}
return slice
}
// Add a new value to the set of values
func (args Args) Add(key, value string) {
if _, ok := args.fields[key]; ok {
args.fields[key][value] = true
} else {
args.fields[key] = map[string]bool{value: true}
}
}
// Del removes a value from the set
func (args Args) Del(key, value string) {
if _, ok := args.fields[key]; ok {
delete(args.fields[key], value)
if len(args.fields[key]) == 0 {
delete(args.fields, key)
}
}
}
// Len returns the number of keys in the mapping
func (args Args) Len() int {
return len(args.fields)
}
// MatchKVList returns true if all the pairs in sources exist as key=value
// pairs in the mapping at key, or if there are no values at key.
func (args Args) MatchKVList(key string, sources map[string]string) bool {
fieldValues := args.fields[key]
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
if len(sources) == 0 {
return false
}
for value := range fieldValues {
testK, testV, hasValue := strings.Cut(value, "=")
v, ok := sources[testK]
if !ok {
return false
}
if hasValue && testV != v {
return false
}
}
return true
}
// Match returns true if any of the values at key match the source string
func (args Args) Match(field, source string) bool {
if args.ExactMatch(field, source) {
return true
}
fieldValues := args.fields[field]
for name2match := range fieldValues {
match, err := regexp.MatchString(name2match, source)
if err != nil {
continue
}
if match {
return true
}
}
return false
}
// GetBoolOrDefault returns a boolean value of the key if the key is present
// and is intepretable as a boolean value. Otherwise the default value is returned.
// Error is not nil only if the filter values are not valid boolean or are conflicting.
func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
fieldValues, ok := args.fields[key]
if !ok {
return defaultValue, nil
}
if len(fieldValues) == 0 {
return defaultValue, &invalidFilter{key, nil}
}
isFalse := fieldValues["0"] || fieldValues["false"]
isTrue := fieldValues["1"] || fieldValues["true"]
conflicting := isFalse && isTrue
invalid := !isFalse && !isTrue
if conflicting || invalid {
return defaultValue, &invalidFilter{key, args.Get(key)}
} else if isFalse {
return false, nil
} else if isTrue {
return true, nil
}
// This code shouldn't be reached.
return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)}
}
// ExactMatch returns true if the source matches exactly one of the values.
func (args Args) ExactMatch(key, source string) bool {
fieldValues, ok := args.fields[key]
// do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
}
// try to match full name value to avoid O(N) regular expression matching
return fieldValues[source]
}
// UniqueExactMatch returns true if there is only one value and the source
// matches exactly the value.
func (args Args) UniqueExactMatch(key, source string) bool {
fieldValues := args.fields[key]
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
if len(args.fields[key]) != 1 {
return false
}
// try to match full name value to avoid O(N) regular expression matching
return fieldValues[source]
}
// FuzzyMatch returns true if the source matches exactly one value, or the
// source has one of the values as a prefix.
func (args Args) FuzzyMatch(key, source string) bool {
if args.ExactMatch(key, source) {
return true
}
fieldValues := args.fields[key]
for prefix := range fieldValues {
if strings.HasPrefix(source, prefix) {
return true
}
}
return false
}
// Contains returns true if the key exists in the mapping
func (args Args) Contains(field string) bool {
_, ok := args.fields[field]
return ok
}
// Validate compared the set of accepted keys against the keys in the mapping.
// An error is returned if any mapping keys are not in the accepted set.
func (args Args) Validate(accepted map[string]bool) error {
for name := range args.fields {
if !accepted[name] {
return &invalidFilter{name, nil}
}
}
return nil
}
// WalkValues iterates over the list of values for a key in the mapping and calls
// op() for each value. If op returns an error the iteration stops and the
// error is returned.
func (args Args) WalkValues(field string, op func(value string) error) error {
if _, ok := args.fields[field]; !ok {
return nil
}
for v := range args.fields[field] {
if err := op(v); err != nil {
return err
}
}
return nil
}
// Clone returns a copy of args.
func (args Args) Clone() (newArgs Args) {
newArgs.fields = make(map[string]map[string]bool, len(args.fields))
for k, m := range args.fields {
var mm map[string]bool
if m != nil {
mm = make(map[string]bool, len(m))
for kk, v := range m {
mm[kk] = v
}
}
newArgs.fields[k] = mm
}
return newArgs
}
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
m := map[string]map[string]bool{}
for k, v := range d {
values := map[string]bool{}
for _, vv := range v {
values[vv] = true
}
m[k] = values
}
return m
}
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
m := map[string][]string{}
for k, v := range f {
values := []string{}
for kk := range v {
if v[kk] {
values = append(values, kk)
}
}
m[k] = values
}
return m
}

View File

@ -0,0 +1,23 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// GraphDriverData Information about the storage driver used to store the container's and
// image's filesystem.
//
// swagger:model GraphDriverData
type GraphDriverData struct {
// Low-level storage metadata, provided as key/value pairs.
//
// This information is driver-specific, and depends on the storage-driver
// in use, and should be used for informational purposes only.
//
// Required: true
Data map[string]string `json:"Data"`
// Name of the storage driver.
// Required: true
Name string `json:"Name"`
}

View File

@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// IDResponse Response to an API call that returns just an Id
// swagger:model IdResponse
type IDResponse struct {
// The id of the newly created object.
// Required: true
ID string `json:"Id"`
}

View File

@ -0,0 +1,15 @@
package image
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// DeleteResponse delete response
// swagger:model DeleteResponse
type DeleteResponse struct {
// The image ID of an image that was deleted
Deleted string `json:"Deleted,omitempty"`
// The image ID of an image that was untagged
Untagged string `json:"Untagged,omitempty"`
}

View File

@ -0,0 +1,47 @@
package image
import (
"io"
"time"
)
// Metadata contains engine-local data about the image.
type Metadata struct {
// LastTagTime is the date and time at which the image was last tagged.
LastTagTime time.Time `json:",omitempty"`
}
// PruneReport contains the response for Engine API:
// POST "/images/prune"
type PruneReport struct {
ImagesDeleted []DeleteResponse
SpaceReclaimed uint64
}
// LoadResponse returns information to the client about a load process.
//
// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
//
// This type was added in https://github.com/moby/moby/pull/18878, related
// to https://github.com/moby/moby/issues/19177;
//
// Make docker load to output json when the response content type is json
// Swarm hijacks the response from docker load and returns JSON rather
// than plain text like the Engine does. This makes the API library to return
// information to figure that out.
//
// However the "load" endpoint unconditionally returns JSON;
// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
//
// PR https://github.com/moby/moby/pull/21959 made the response-type depend
// on whether "quiet" was set, but this logic got changed in a follow-up
// https://github.com/moby/moby/pull/25557, which made the JSON response-type
// unconditionally, but the output produced depend on whether"quiet" was set.
//
// We should deprecated the "quiet" option, as it's really a client
// responsibility.
type LoadResponse struct {
// Body must be closed to avoid a resource leak
Body io.ReadCloser
JSON bool
}

View File

@ -0,0 +1,36 @@
package image // import "github.com/docker/docker/api/types/image"
// ----------------------------------------------------------------------------
// Code generated by `swagger generate operation`. DO NOT EDIT.
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// HistoryResponseItem individual image layer information in response to ImageHistory operation
// swagger:model HistoryResponseItem
type HistoryResponseItem struct {
// comment
// Required: true
Comment string `json:"Comment"`
// created
// Required: true
Created int64 `json:"Created"`
// created by
// Required: true
CreatedBy string `json:"CreatedBy"`
// Id
// Required: true
ID string `json:"Id"`
// size
// Required: true
Size int64 `json:"Size"`
// tags
// Required: true
Tags []string `json:"Tags"`
}

View File

@ -0,0 +1,85 @@
package image
import (
"context"
"io"
"github.com/docker/docker/api/types/filters"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImportSource holds source information for ImageImport
type ImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
}
// ImportOptions holds information to import images from the client host.
type ImportOptions struct {
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
Message string // Message is the message to tag the image with
Changes []string // Changes are the raw changes to apply to this image
Platform string // Platform is the target platform of the image
}
// CreateOptions holds information to create images.
type CreateOptions struct {
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
}
// PullOptions holds information to pull images.
type PullOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
// PrivilegeFunc is a function that clients can supply to retry operations
// after getting an authorization error. This function returns the registry
// authentication header value in base64 encoded format, or an error if the
// privilege request fails.
//
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
PrivilegeFunc func(context.Context) (string, error)
Platform string
}
// PushOptions holds information to push images.
type PushOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
// PrivilegeFunc is a function that clients can supply to retry operations
// after getting an authorization error. This function returns the registry
// authentication header value in base64 encoded format, or an error if the
// privilege request fails.
//
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
PrivilegeFunc func(context.Context) (string, error)
// Platform is an optional field that selects a specific platform to push
// when the image is a multi-platform image.
// Using this will only push a single platform-specific manifest.
Platform *ocispec.Platform `json:",omitempty"`
}
// ListOptions holds parameters to list images with.
type ListOptions struct {
// All controls whether all images in the graph are filtered, or just
// the heads.
All bool
// Filters is a JSON-encoded set of filter arguments.
Filters filters.Args
// SharedSize indicates whether the shared size of images should be computed.
SharedSize bool
// ContainerCount indicates whether container count should be computed.
ContainerCount bool
}
// RemoveOptions holds parameters to remove images.
type RemoveOptions struct {
Force bool
PruneChildren bool
}

View File

@ -0,0 +1,89 @@
package image
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Summary summary
// swagger:model Summary
type Summary struct {
// Number of containers using this image. Includes both stopped and running
// containers.
//
// This size is not calculated by default, and depends on which API endpoint
// is used. `-1` indicates that the value has not been set / calculated.
//
// Required: true
Containers int64 `json:"Containers"`
// Date and time at which the image was created as a Unix timestamp
// (number of seconds sinds EPOCH).
//
// Required: true
Created int64 `json:"Created"`
// ID is the content-addressable ID of an image.
//
// This identifier is a content-addressable digest calculated from the
// image's configuration (which includes the digests of layers used by
// the image).
//
// Note that this digest differs from the `RepoDigests` below, which
// holds digests of image manifests that reference the image.
//
// Required: true
ID string `json:"Id"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// ID of the parent image.
//
// Depending on how the image was created, this field may be empty and
// is only set for images that were built/created locally. This field
// is empty if the image was pulled from an image registry.
//
// Required: true
ParentID string `json:"ParentId"`
// List of content-addressable digests of locally available image manifests
// that the image is referenced from. Multiple manifests can refer to the
// same image.
//
// These digests are usually only available if the image was either pulled
// from a registry, or if the image was pushed to a registry, which is when
// the manifest is generated and its digest calculated.
//
// Required: true
RepoDigests []string `json:"RepoDigests"`
// List of image names/tags in the local image cache that reference this
// image.
//
// Multiple image tags can refer to the same image, and this list may be
// empty if no tags reference the image, in which case the image is
// "untagged", in which case it can still be referenced by its ID.
//
// Required: true
RepoTags []string `json:"RepoTags"`
// Total size of image layers that are shared between this image and other
// images.
//
// This size is not calculated by default. `-1` indicates that the value
// has not been set / calculated.
//
// Required: true
SharedSize int64 `json:"SharedSize"`
// Total size of the image including all layers it is composed of.
//
// Required: true
Size int64 `json:"Size"`
// Total size of the image including all layers it is composed of.
//
// Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
VirtualSize int64 `json:"VirtualSize,omitempty"`
}

View File

@ -0,0 +1,150 @@
package mount // import "github.com/docker/docker/api/types/mount"
import (
"os"
)
// Type represents the type of a mount.
type Type string
// Type constants
const (
// TypeBind is the type for mounting host dir
TypeBind Type = "bind"
// TypeVolume is the type for remote storage volumes
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
// TypeNamedPipe is the type for mounting Windows named pipes
TypeNamedPipe Type = "npipe"
// TypeCluster is the type for Swarm Cluster Volumes.
TypeCluster Type = "cluster"
)
// Mount represents a mount (volume).
type Mount struct {
Type Type `json:",omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
// Source is not supported for tmpfs (must be an empty value)
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible
Consistency Consistency `json:",omitempty"`
BindOptions *BindOptions `json:",omitempty"`
VolumeOptions *VolumeOptions `json:",omitempty"`
TmpfsOptions *TmpfsOptions `json:",omitempty"`
ClusterOptions *ClusterOptions `json:",omitempty"`
}
// Propagation represents the propagation of a mount.
type Propagation string
const (
// PropagationRPrivate RPRIVATE
PropagationRPrivate Propagation = "rprivate"
// PropagationPrivate PRIVATE
PropagationPrivate Propagation = "private"
// PropagationRShared RSHARED
PropagationRShared Propagation = "rshared"
// PropagationShared SHARED
PropagationShared Propagation = "shared"
// PropagationRSlave RSLAVE
PropagationRSlave Propagation = "rslave"
// PropagationSlave SLAVE
PropagationSlave Propagation = "slave"
)
// Propagations is the list of all valid mount propagations
var Propagations = []Propagation{
PropagationRPrivate,
PropagationPrivate,
PropagationRShared,
PropagationShared,
PropagationRSlave,
PropagationSlave,
}
// Consistency represents the consistency requirements of a mount.
type Consistency string
const (
// ConsistencyFull guarantees bind mount-like consistency
ConsistencyFull Consistency = "consistent"
// ConsistencyCached mounts can cache read data and FS structure
ConsistencyCached Consistency = "cached"
// ConsistencyDelegated mounts can cache read and written data and structure
ConsistencyDelegated Consistency = "delegated"
// ConsistencyDefault provides "consistent" behavior unless overridden
ConsistencyDefault Consistency = "default"
)
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation Propagation `json:",omitempty"`
NonRecursive bool `json:",omitempty"`
CreateMountpoint bool `json:",omitempty"`
// ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive
// (unless NonRecursive is set to true in conjunction).
ReadOnlyNonRecursive bool `json:",omitempty"`
// ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only.
ReadOnlyForceRecursive bool `json:",omitempty"`
}
// VolumeOptions represents the options for a mount of type volume.
type VolumeOptions struct {
NoCopy bool `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Subpath string `json:",omitempty"`
DriverConfig *Driver `json:",omitempty"`
}
// Driver represents a volume driver.
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TmpfsOptions defines options specific to mounts of type "tmpfs".
type TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be converted to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//
// Percentages are not supported.
SizeBytes int64 `json:",omitempty"`
// Mode of the tmpfs upon creation
Mode os.FileMode `json:",omitempty"`
// Options to be passed to the tmpfs mount. An array of arrays. Flag
// options should be provided as 1-length arrays. Other types should be
// provided as 2-length arrays, where the first item is the key and the
// second the value.
Options [][]string `json:",omitempty"`
// TODO(stevvooe): There are several more tmpfs flags, specified in the
// daemon, that are accepted. Only the most basic are added for now.
//
// From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56
//
// var validFlags = map[string]bool{
// "": true,
// "size": true, X
// "mode": true, X
// "uid": true,
// "gid": true,
// "nr_inodes": true,
// "nr_blocks": true,
// "mpol": true,
// }
//
// Some of these may be straightforward to add, but others, such as
// uid/gid have implications in a clustered system.
}
// ClusterOptions specifies options for a Cluster volume.
type ClusterOptions struct {
// intentionally empty
}

View File

@ -0,0 +1,19 @@
package network
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// CreateResponse NetworkCreateResponse
//
// OK response to NetworkCreate operation
// swagger:model CreateResponse
type CreateResponse struct {
// The ID of the created network.
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warning string `json:"Warning"`
}

View File

@ -0,0 +1,147 @@
package network
import (
"errors"
"fmt"
"net"
"github.com/docker/docker/internal/multierror"
)
// EndpointSettings stores the network endpoint details
type EndpointSettings struct {
// Configurations
IPAMConfig *EndpointIPAMConfig
Links []string
Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint.
// MacAddress may be used to specify a MAC address when the container is created.
// Once the container is running, it becomes operational data (it may contain a
// generated address).
MacAddress string
DriverOpts map[string]string
// Operational data
NetworkID string
EndpointID string
Gateway string
IPAddress string
IPPrefixLen int
IPv6Gateway string
GlobalIPv6Address string
GlobalIPv6PrefixLen int
// DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to
// generate PTR records.
DNSNames []string
}
// Copy makes a deep copy of `EndpointSettings`
func (es *EndpointSettings) Copy() *EndpointSettings {
epCopy := *es
if es.IPAMConfig != nil {
epCopy.IPAMConfig = es.IPAMConfig.Copy()
}
if es.Links != nil {
links := make([]string, 0, len(es.Links))
epCopy.Links = append(links, es.Links...)
}
if es.Aliases != nil {
aliases := make([]string, 0, len(es.Aliases))
epCopy.Aliases = append(aliases, es.Aliases...)
}
if len(es.DNSNames) > 0 {
epCopy.DNSNames = make([]string, len(es.DNSNames))
copy(epCopy.DNSNames, es.DNSNames)
}
return &epCopy
}
// EndpointIPAMConfig represents IPAM configurations for the endpoint
type EndpointIPAMConfig struct {
IPv4Address string `json:",omitempty"`
IPv6Address string `json:",omitempty"`
LinkLocalIPs []string `json:",omitempty"`
}
// Copy makes a copy of the endpoint ipam config
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
cfgCopy := *cfg
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
return &cfgCopy
}
// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an
// EndpointIPAMConfig is valid for a specific network.
type NetworkSubnet interface {
// Contains checks whether the NetworkSubnet contains [addr].
Contains(addr net.IP) bool
// IsStatic checks whether the subnet was statically allocated (ie. user-defined).
IsStatic() bool
}
// IsInRange checks whether static IP addresses are valid in a specific network.
func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error {
var errs []error
if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil {
errs = append(errs, err)
}
if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil {
errs = append(errs, err)
}
return multierror.Join(errs...)
}
func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error {
if epAddr == "" {
return nil
}
var staticSubnet bool
parsedAddr := net.ParseIP(epAddr)
for _, subnet := range ipamSubnets {
if subnet.IsStatic() {
staticSubnet = true
if subnet.Contains(parsedAddr) {
return nil
}
}
}
if staticSubnet {
return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr)
}
return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets")
}
// Validate checks whether cfg is valid.
func (cfg *EndpointIPAMConfig) Validate() error {
if cfg == nil {
return nil
}
var errs []error
if cfg.IPv4Address != "" {
if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() {
errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address))
}
}
if cfg.IPv6Address != "" {
if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() {
errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address))
}
}
for _, addr := range cfg.LinkLocalIPs {
if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() {
errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr))
}
}
return multierror.Join(errs...)
}

View File

@ -0,0 +1,134 @@
package network
import (
"errors"
"fmt"
"net/netip"
"github.com/docker/docker/internal/multierror"
)
// IPAM represents IP Address Management
type IPAM struct {
Driver string
Options map[string]string // Per network IPAM driver options
Config []IPAMConfig
}
// IPAMConfig represents IPAM configurations
type IPAMConfig struct {
Subnet string `json:",omitempty"`
IPRange string `json:",omitempty"`
Gateway string `json:",omitempty"`
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
}
type ipFamily string
const (
ip4 ipFamily = "IPv4"
ip6 ipFamily = "IPv6"
)
// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of
// errors found.
func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error {
if ipam == nil {
return nil
}
var errs []error
for _, cfg := range ipam.Config {
subnet, err := netip.ParsePrefix(cfg.Subnet)
if err != nil {
errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet))
continue
}
subnetFamily := ip4
if subnet.Addr().Is6() {
subnetFamily = ip6
}
if !enableIPv6 && subnetFamily == ip6 {
continue
}
if subnet != subnet.Masked() {
errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked()))
}
if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 {
errs = append(errs, ipRangeErrs...)
}
if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil {
errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err))
}
for auxName, aux := range cfg.AuxAddress {
if err := validateAddress(aux, subnet, subnetFamily); err != nil {
errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err))
}
}
}
if err := multierror.Join(errs...); err != nil {
return fmt.Errorf("invalid network config:\n%w", err)
}
return nil
}
func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error {
if ipRange == "" {
return nil
}
prefix, err := netip.ParsePrefix(ipRange)
if err != nil {
return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)}
}
family := ip4
if prefix.Addr().Is6() {
family = ip6
}
if family != subnetFamily {
return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)}
}
var errs []error
if prefix.Bits() < subnet.Bits() {
errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet))
}
if prefix != prefix.Masked() {
errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked()))
}
if !subnet.Overlaps(prefix) {
errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet))
}
return errs
}
func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error {
if address == "" {
return nil
}
addr, err := netip.ParseAddr(address)
if err != nil {
return errors.New("invalid address")
}
family := ip4
if addr.Is6() {
family = ip6
}
if family != subnetFamily {
return fmt.Errorf("parent subnet is an %s block", subnetFamily)
}
if !subnet.Contains(addr) {
return fmt.Errorf("parent subnet %s doesn't contain this address", subnet)
}
return nil
}

View File

@ -0,0 +1,166 @@
package network // import "github.com/docker/docker/api/types/network"
import (
"time"
"github.com/docker/docker/api/types/filters"
)
const (
// NetworkDefault is a platform-independent alias to choose the platform-specific default network stack.
NetworkDefault = "default"
// NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux)
NetworkHost = "host"
// NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows)
NetworkNone = "none"
// NetworkBridge is the name of the default network on Linux
NetworkBridge = "bridge"
// NetworkNat is the name of the default network on Windows
NetworkNat = "nat"
)
// CreateRequest is the request message sent to the server for network create call.
type CreateRequest struct {
CreateOptions
Name string // Name is the requested name of the network.
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
// package to older daemons.
CheckDuplicate *bool `json:",omitempty"`
}
// CreateOptions holds options to create a network.
type CreateOptions struct {
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
IPAM *IPAM // IPAM is the network's IP Address Management.
Internal bool // Internal represents if the network is used internal only.
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly].
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
Labels map[string]string // Labels holds metadata specific to the network being created.
}
// ListOptions holds parameters to filter the list of networks with.
type ListOptions struct {
Filters filters.Args
}
// InspectOptions holds parameters to inspect network.
type InspectOptions struct {
Scope string
Verbose bool
}
// ConnectOptions represents the data to be used to connect a container to the
// network.
type ConnectOptions struct {
Container string
EndpointConfig *EndpointSettings `json:",omitempty"`
}
// DisconnectOptions represents the data to be used to disconnect a container
// from the network.
type DisconnectOptions struct {
Container string
Force bool
}
// Inspect is the body of the "get network" http response message.
type Inspect struct {
Name string // Name is the name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
Services map[string]ServiceInfo `json:",omitempty"`
}
// Summary is used as response when listing networks. It currently is an alias
// for [Inspect], but may diverge in the future, as not all information may
// be included when listing networks.
type Summary = Inspect
// Address represents an IP address
type Address struct {
Addr string
PrefixLen int
}
// PeerInfo represents one peer of an overlay network
type PeerInfo struct {
Name string
IP string
}
// Task carries the information about one backend task
type Task struct {
Name string
EndpointID string
EndpointIP string
Info map[string]string
}
// ServiceInfo represents service parameters with the list of service's tasks
type ServiceInfo struct {
VIP string
Ports []string
LocalLBIndex int
Tasks []Task
}
// EndpointResource contains network resources allocated and used for a
// container in a network.
type EndpointResource struct {
Name string
EndpointID string
MacAddress string
IPv4Address string
IPv6Address string
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
}
// ConfigReference specifies the source which provides a network's configuration
type ConfigReference struct {
Network string
}
var acceptedFilters = map[string]bool{
"dangling": true,
"driver": true,
"id": true,
"label": true,
"name": true,
"scope": true,
"type": true,
}
// ValidateFilters validates the list of filter args with the available filters.
func ValidateFilters(filter filters.Args) error {
return filter.Validate(acceptedFilters)
}
// PruneReport contains the response for Engine API:
// POST "/networks/prune"
type PruneReport struct {
NetworksDeleted []string
}

203
vendor/github.com/docker/docker/api/types/plugin.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Plugin A plugin for the Engine API
// swagger:model Plugin
type Plugin struct {
// config
// Required: true
Config PluginConfig `json:"Config"`
// True if the plugin is running. False if the plugin is not running, only installed.
// Required: true
Enabled bool `json:"Enabled"`
// Id
ID string `json:"Id,omitempty"`
// name
// Required: true
Name string `json:"Name"`
// plugin remote reference used to push/pull the plugin
PluginReference string `json:"PluginReference,omitempty"`
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
}
// PluginConfig The config of a plugin.
// swagger:model PluginConfig
type PluginConfig struct {
// args
// Required: true
Args PluginConfigArgs `json:"Args"`
// description
// Required: true
Description string `json:"Description"`
// Docker Version used to create the plugin
DockerVersion string `json:"DockerVersion,omitempty"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
// entrypoint
// Required: true
Entrypoint []string `json:"Entrypoint"`
// env
// Required: true
Env []PluginEnv `json:"Env"`
// interface
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// ipc host
// Required: true
IpcHost bool `json:"IpcHost"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
// network
// Required: true
Network PluginConfigNetwork `json:"Network"`
// pid host
// Required: true
PidHost bool `json:"PidHost"`
// propagated mount
// Required: true
PropagatedMount string `json:"PropagatedMount"`
// user
User PluginConfigUser `json:"User,omitempty"`
// work dir
// Required: true
WorkDir string `json:"WorkDir"`
// rootfs
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
}
// PluginConfigArgs plugin config args
// swagger:model PluginConfigArgs
type PluginConfigArgs struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value []string `json:"Value"`
}
// PluginConfigInterface The interface between Docker and the plugin
// swagger:model PluginConfigInterface
type PluginConfigInterface struct {
// Protocol to use for clients connecting to the plugin.
ProtocolScheme string `json:"ProtocolScheme,omitempty"`
// socket
// Required: true
Socket string `json:"Socket"`
// types
// Required: true
Types []PluginInterfaceType `json:"Types"`
}
// PluginConfigLinux plugin config linux
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// allow all devices
// Required: true
AllowAllDevices bool `json:"AllowAllDevices"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
}
// PluginConfigNetwork plugin config network
// swagger:model PluginConfigNetwork
type PluginConfigNetwork struct {
// type
// Required: true
Type string `json:"Type"`
}
// PluginConfigRootfs plugin config rootfs
// swagger:model PluginConfigRootfs
type PluginConfigRootfs struct {
// diff ids
DiffIds []string `json:"diff_ids"`
// type
Type string `json:"type,omitempty"`
}
// PluginConfigUser plugin config user
// swagger:model PluginConfigUser
type PluginConfigUser struct {
// g ID
GID uint32 `json:"GID,omitempty"`
// UID
UID uint32 `json:"UID,omitempty"`
}
// PluginSettings Settings that can be modified by users.
// swagger:model PluginSettings
type PluginSettings struct {
// args
// Required: true
Args []string `json:"Args"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
// env
// Required: true
Env []string `json:"Env"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
}

View File

@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginDevice plugin device
// swagger:model PluginDevice
type PluginDevice struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// path
// Required: true
Path *string `json:"Path"`
// settable
// Required: true
Settable []string `json:"Settable"`
}

View File

@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginEnv plugin env
// swagger:model PluginEnv
type PluginEnv struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value *string `json:"Value"`
}

View File

@ -0,0 +1,21 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginInterfaceType plugin interface type
// swagger:model PluginInterfaceType
type PluginInterfaceType struct {
// capability
// Required: true
Capability string `json:"Capability"`
// prefix
// Required: true
Prefix string `json:"Prefix"`
// version
// Required: true
Version string `json:"Version"`
}

View File

@ -0,0 +1,37 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginMount plugin mount
// swagger:model PluginMount
type PluginMount struct {
// description
// Required: true
Description string `json:"Description"`
// destination
// Required: true
Destination string `json:"Destination"`
// name
// Required: true
Name string `json:"Name"`
// options
// Required: true
Options []string `json:"Options"`
// settable
// Required: true
Settable []string `json:"Settable"`
// source
// Required: true
Source *string `json:"Source"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@ -0,0 +1,71 @@
package types // import "github.com/docker/docker/api/types"
import (
"encoding/json"
"fmt"
"sort"
)
// PluginsListResponse contains the response for the Engine API
type PluginsListResponse []*Plugin
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
versionIndex := len(p)
prefixIndex := 0
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
return fmt.Errorf("%q is not a plugin interface type", p)
}
p = p[1 : len(p)-1]
loop:
for i, b := range p {
switch b {
case '.':
prefixIndex = i
case '/':
versionIndex = i
break loop
}
}
t.Prefix = string(p[:prefixIndex])
t.Capability = string(p[prefixIndex+1 : versionIndex])
if versionIndex < len(p) {
t.Version = string(p[versionIndex+1:])
}
return nil
}
// MarshalJSON implements json.Marshaler for PluginInterfaceType
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
// String implements fmt.Stringer for PluginInterfaceType
func (t PluginInterfaceType) String() string {
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string
Description string
Value []string
}
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege
func (s PluginPrivileges) Len() int {
return len(s)
}
func (s PluginPrivileges) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PluginPrivileges) Swap(i, j int) {
sort.Strings(s[i].Value)
sort.Strings(s[j].Value)
s[i], s[j] = s[j], s[i]
}

23
vendor/github.com/docker/docker/api/types/port.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Port An open port on a container
// swagger:model Port
type Port struct {
// Host IP address that the container's port is mapped to
IP string `json:"IP,omitempty"`
// Port on the container
// Required: true
PrivatePort uint16 `json:"PrivatePort"`
// Port exposed on the host
PublicPort uint16 `json:"PublicPort,omitempty"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@ -0,0 +1,99 @@
package registry // import "github.com/docker/docker/api/types/registry"
import (
"encoding/base64"
"encoding/json"
"io"
"strings"
"github.com/pkg/errors"
)
// AuthHeader is the name of the header used to send encoded registry
// authorization credentials for registry operations (push/pull).
const AuthHeader = "X-Registry-Auth"
// AuthConfig contains authorization information for connecting to a Registry.
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}
// EncodeAuthConfig serializes the auth configuration as a base64url encoded
// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header.
//
// For details on base64url encoding, see:
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return "", errInvalidParameter{err}
}
return base64.URLEncoding.EncodeToString(buf), nil
}
// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON
// authentication information as sent through the X-Registry-Auth header.
//
// This function always returns an AuthConfig, even if an error occurs. It is up
// to the caller to decide if authentication is required, and if the error can
// be ignored.
//
// For details on base64url encoding, see:
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
if authEncoded == "" {
return &AuthConfig{}, nil
}
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
return decodeAuthConfigFromReader(authJSON)
}
// DecodeAuthConfigBody decodes authentication information as sent as JSON in the
// body of a request. This function is to provide backward compatibility with old
// clients and API versions. Current clients and API versions expect authentication
// to be provided through the X-Registry-Auth header.
//
// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an
// error occurs. It is up to the caller to decide if authentication is required,
// and if the error can be ignored.
func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
return decodeAuthConfigFromReader(rdr)
}
func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) {
authConfig := &AuthConfig{}
if err := json.NewDecoder(rdr).Decode(authConfig); err != nil {
// always return an (empty) AuthConfig to increase compatibility with
// the existing API.
return &AuthConfig{}, invalid(err)
}
return authConfig, nil
}
func invalid(err error) error {
return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")}
}
type errInvalidParameter struct{ error }
func (errInvalidParameter) InvalidParameter() {}
func (e errInvalidParameter) Cause() error { return e.error }
func (e errInvalidParameter) Unwrap() error { return e.error }

View File

@ -0,0 +1,21 @@
package registry // import "github.com/docker/docker/api/types/registry"
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// AuthenticateOKBody authenticate o k body
// swagger:model AuthenticateOKBody
type AuthenticateOKBody struct {
// An opaque token used to authenticate a user after a successful login
// Required: true
IdentityToken string `json:"IdentityToken"`
// The status of the authentication
// Required: true
Status string `json:"Status"`
}

View File

@ -0,0 +1,96 @@
package registry // import "github.com/docker/docker/api/types/registry"
import (
"encoding/json"
"net"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ServiceConfig stores daemon registry services configuration.
type ServiceConfig struct {
AllowNondistributableArtifactsCIDRs []*NetIPNet
AllowNondistributableArtifactsHostnames []string
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
}
// NetIPNet is the net.IPNet type, which can be marshalled and
// unmarshalled to JSON
type NetIPNet net.IPNet
// String returns the CIDR notation of ipnet
func (ipnet *NetIPNet) String() string {
return (*net.IPNet)(ipnet).String()
}
// MarshalJSON returns the JSON representation of the IPNet
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
return json.Marshal((*net.IPNet)(ipnet).String())
}
// UnmarshalJSON sets the IPNet from a byte array of JSON
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
var ipnetStr string
if err = json.Unmarshal(b, &ipnetStr); err == nil {
var cidr *net.IPNet
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
*ipnet = NetIPNet(*cidr)
}
}
return
}
// IndexInfo contains information about a registry
//
// RepositoryInfo Examples:
//
// {
// "Index" : {
// "Name" : "docker.io",
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
// "Secure" : true,
// "Official" : true,
// },
// "RemoteName" : "library/debian",
// "LocalName" : "debian",
// "CanonicalName" : "docker.io/debian"
// "Official" : true,
// }
//
// {
// "Index" : {
// "Name" : "127.0.0.1:5000",
// "Mirrors" : [],
// "Secure" : false,
// "Official" : false,
// },
// "RemoteName" : "user/repo",
// "LocalName" : "127.0.0.1:5000/user/repo",
// "CanonicalName" : "127.0.0.1:5000/user/repo",
// "Official" : false,
// }
type IndexInfo struct {
// Name is the name of the registry, such as "docker.io"
Name string
// Mirrors is a list of mirrors, expressed as URIs
Mirrors []string
// Secure is set to false if the registry is part of the list of
// insecure registries. Insecure registries accept HTTP and/or accept
// HTTPS with certificates from unknown CAs.
Secure bool
// Official indicates whether this is an official registry
Official bool
}
// DistributionInspect describes the result obtained from contacting the
// registry to retrieve image metadata
type DistributionInspect struct {
// Descriptor contains information about the manifest, including
// the content addressable digest
Descriptor ocispec.Descriptor
// Platforms contains the list of platforms supported by the image,
// obtained by parsing the manifest
Platforms []ocispec.Platform
}

View File

@ -0,0 +1,47 @@
package registry
import (
"context"
"github.com/docker/docker/api/types/filters"
)
// SearchOptions holds parameters to search images with.
type SearchOptions struct {
RegistryAuth string
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
// supply to retry operations after getting an authorization error.
//
// It must return the registry authentication header value in base64
// format, or an error if the privilege request fails.
PrivilegeFunc func(context.Context) (string, error)
Filters filters.Args
Limit int
}
// SearchResult describes a search result returned from a registry
type SearchResult struct {
// StarCount indicates the number of stars this repository has
StarCount int `json:"star_count"`
// IsOfficial is true if the result is from an official repository.
IsOfficial bool `json:"is_official"`
// Name is the name of the repository
Name string `json:"name"`
// IsAutomated indicates whether the result is automated.
//
// Deprecated: the "is_automated" field is deprecated and will always be "false".
IsAutomated bool `json:"is_automated"`
// Description is a textual description of the repository
Description string `json:"description"`
}
// SearchResults lists a collection search results returned from a registry
type SearchResults struct {
// Query contains the query string that generated the search results
Query string `json:"query"`
// NumResults indicates the number of results the query returned
NumResults int `json:"num_results"`
// Results is a slice containing the actual results for the search
Results []SearchResult `json:"results"`
}

View File

@ -0,0 +1,30 @@
package strslice // import "github.com/docker/docker/api/types/strslice"
import "encoding/json"
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice []string
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
// strings. This method is needed to implement json.Unmarshaler.
func (e *StrSlice) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
// With no input, we preserve the existing value by returning nil and
// leaving the target alone. This allows defining default values for
// the type.
return nil
}
p := make([]string, 0, 1)
if err := json.Unmarshal(b, &p); err != nil {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p = append(p, s)
}
*e = p
return nil
}

View File

@ -0,0 +1,48 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"strconv"
"time"
)
// Version represents the internal object version.
type Version struct {
Index uint64 `json:",omitempty"`
}
// String implements fmt.Stringer interface.
func (v Version) String() string {
return strconv.FormatUint(v.Index, 10)
}
// Meta is a base object inherited by most of the other once.
type Meta struct {
Version Version `json:",omitempty"`
CreatedAt time.Time `json:",omitempty"`
UpdatedAt time.Time `json:",omitempty"`
}
// Annotations represents how to describe an object.
type Annotations struct {
Name string `json:",omitempty"`
Labels map[string]string `json:"Labels"`
}
// Driver represents a driver (network, logging, secrets backend).
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TLSInfo represents the TLS information about what CA certificate is trusted,
// and who the issuer for a TLS certificate is
type TLSInfo struct {
// TrustRoot is the trusted CA root certificate in PEM format
TrustRoot string `json:",omitempty"`
// CertIssuer is the raw subject bytes of the issuer
CertIssuerSubject []byte `json:",omitempty"`
// CertIssuerPublicKey is the raw public key bytes of the issuer
CertIssuerPublicKey []byte `json:",omitempty"`
}

View File

@ -0,0 +1,40 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import "os"
// Config represents a config.
type Config struct {
ID string
Meta
Spec ConfigSpec
}
// ConfigSpec represents a config specification from a config in swarm
type ConfigSpec struct {
Annotations
Data []byte `json:",omitempty"`
// Templating controls whether and how to evaluate the config payload as
// a template. If it is not set, no templating is used.
Templating *Driver `json:",omitempty"`
}
// ConfigReferenceFileTarget is a file target in a config reference
type ConfigReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// ConfigReferenceRuntimeTarget is a target for a config specifying that it
// isn't mounted into the container but instead has some other purpose.
type ConfigReferenceRuntimeTarget struct{}
// ConfigReference is a reference to a config in swarm
type ConfigReference struct {
File *ConfigReferenceFileTarget `json:",omitempty"`
Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
ConfigID string
ConfigName string
}

View File

@ -0,0 +1,119 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
)
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
// Detailed documentation is available in:
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
// `nameserver`, `search`, `options` have been supported.
// TODO: `domain` is not supported yet.
type DNSConfig struct {
// Nameservers specifies the IP addresses of the name servers
Nameservers []string `json:",omitempty"`
// Search specifies the search list for host-name lookup
Search []string `json:",omitempty"`
// Options allows certain internal resolver variables to be modified
Options []string `json:",omitempty"`
}
// SELinuxContext contains the SELinux labels of the container.
type SELinuxContext struct {
Disable bool
User string
Role string
Type string
Level string
}
// SeccompMode is the type used for the enumeration of possible seccomp modes
// in SeccompOpts
type SeccompMode string
const (
SeccompModeDefault SeccompMode = "default"
SeccompModeUnconfined SeccompMode = "unconfined"
SeccompModeCustom SeccompMode = "custom"
)
// SeccompOpts defines the options for configuring seccomp on a swarm-managed
// container.
type SeccompOpts struct {
// Mode is the SeccompMode used for the container.
Mode SeccompMode `json:",omitempty"`
// Profile is the custom seccomp profile as a json object to be used with
// the container. Mode should be set to SeccompModeCustom when using a
// custom profile in this manner.
Profile []byte `json:",omitempty"`
}
// AppArmorMode is type used for the enumeration of possible AppArmor modes in
// AppArmorOpts
type AppArmorMode string
const (
AppArmorModeDefault AppArmorMode = "default"
AppArmorModeDisabled AppArmorMode = "disabled"
)
// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed
// container. Currently, custom AppArmor profiles are not supported.
type AppArmorOpts struct {
Mode AppArmorMode `json:",omitempty"`
}
// CredentialSpec for managed service account (Windows only)
type CredentialSpec struct {
Config string
File string
Registry string
}
// Privileges defines the security options for the container.
type Privileges struct {
CredentialSpec *CredentialSpec
SELinuxContext *SELinuxContext
Seccomp *SeccompOpts `json:",omitempty"`
AppArmor *AppArmorOpts `json:",omitempty"`
NoNewPrivileges bool
}
// ContainerSpec represents the spec of a container.
type ContainerSpec struct {
Image string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Command []string `json:",omitempty"`
Args []string `json:",omitempty"`
Hostname string `json:",omitempty"`
Env []string `json:",omitempty"`
Dir string `json:",omitempty"`
User string `json:",omitempty"`
Groups []string `json:",omitempty"`
Privileges *Privileges `json:",omitempty"`
Init *bool `json:",omitempty"`
StopSignal string `json:",omitempty"`
TTY bool `json:",omitempty"`
OpenStdin bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Mounts []mount.Mount `json:",omitempty"`
StopGracePeriod *time.Duration `json:",omitempty"`
Healthcheck *container.HealthConfig `json:",omitempty"`
// The format of extra hosts on swarmkit is specified in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
Hosts []string `json:",omitempty"`
DNSConfig *DNSConfig `json:",omitempty"`
Secrets []*SecretReference `json:",omitempty"`
Configs []*ConfigReference `json:",omitempty"`
Isolation container.Isolation `json:",omitempty"`
Sysctls map[string]string `json:",omitempty"`
CapabilityAdd []string `json:",omitempty"`
CapabilityDrop []string `json:",omitempty"`
Ulimits []*container.Ulimit `json:",omitempty"`
OomScoreAdj int64 `json:",omitempty"`
}

View File

@ -0,0 +1,121 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"github.com/docker/docker/api/types/network"
)
// Endpoint represents an endpoint.
type Endpoint struct {
Spec EndpointSpec `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
}
// EndpointSpec represents the spec of an endpoint.
type EndpointSpec struct {
Mode ResolutionMode `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
}
// ResolutionMode represents a resolution mode.
type ResolutionMode string
const (
// ResolutionModeVIP VIP
ResolutionModeVIP ResolutionMode = "vip"
// ResolutionModeDNSRR DNSRR
ResolutionModeDNSRR ResolutionMode = "dnsrr"
)
// PortConfig represents the config of a port.
type PortConfig struct {
Name string `json:",omitempty"`
Protocol PortConfigProtocol `json:",omitempty"`
// TargetPort is the port inside the container
TargetPort uint32 `json:",omitempty"`
// PublishedPort is the port on the swarm hosts
PublishedPort uint32 `json:",omitempty"`
// PublishMode is the mode in which port is published
PublishMode PortConfigPublishMode `json:",omitempty"`
}
// PortConfigPublishMode represents the mode in which the port is to
// be published.
type PortConfigPublishMode string
const (
// PortConfigPublishModeIngress is used for ports published
// for ingress load balancing using routing mesh.
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
// PortConfigPublishModeHost is used for ports published
// for direct host level access on the host where the task is running.
PortConfigPublishModeHost PortConfigPublishMode = "host"
)
// PortConfigProtocol represents the protocol of a port.
type PortConfigProtocol string
const (
// TODO(stevvooe): These should be used generally, not just for PortConfig.
// PortConfigProtocolTCP TCP
PortConfigProtocolTCP PortConfigProtocol = "tcp"
// PortConfigProtocolUDP UDP
PortConfigProtocolUDP PortConfigProtocol = "udp"
// PortConfigProtocolSCTP SCTP
PortConfigProtocolSCTP PortConfigProtocol = "sctp"
)
// EndpointVirtualIP represents the virtual ip of a port.
type EndpointVirtualIP struct {
NetworkID string `json:",omitempty"`
Addr string `json:",omitempty"`
}
// Network represents a network.
type Network struct {
ID string
Meta
Spec NetworkSpec `json:",omitempty"`
DriverState Driver `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
}
// NetworkSpec represents the spec of a network.
type NetworkSpec struct {
Annotations
DriverConfiguration *Driver `json:",omitempty"`
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
Ingress bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
ConfigFrom *network.ConfigReference `json:",omitempty"`
Scope string `json:",omitempty"`
}
// NetworkAttachmentConfig represents the configuration of a network attachment.
type NetworkAttachmentConfig struct {
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
DriverOpts map[string]string `json:",omitempty"`
}
// NetworkAttachment represents a network attachment.
type NetworkAttachment struct {
Network Network `json:",omitempty"`
Addresses []string `json:",omitempty"`
}
// IPAMOptions represents ipam options.
type IPAMOptions struct {
Driver Driver `json:",omitempty"`
Configs []IPAMConfig `json:",omitempty"`
}
// IPAMConfig represents ipam configuration.
type IPAMConfig struct {
Subnet string `json:",omitempty"`
Range string `json:",omitempty"`
Gateway string `json:",omitempty"`
}

139
vendor/github.com/docker/docker/api/types/swarm/node.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
// Node represents a node.
type Node struct {
ID string
Meta
// Spec defines the desired state of the node as specified by the user.
// The system will honor this and will *never* modify it.
Spec NodeSpec `json:",omitempty"`
// Description encapsulates the properties of the Node as reported by the
// agent.
Description NodeDescription `json:",omitempty"`
// Status provides the current status of the node, as seen by the manager.
Status NodeStatus `json:",omitempty"`
// ManagerStatus provides the current status of the node's manager
// component, if the node is a manager.
ManagerStatus *ManagerStatus `json:",omitempty"`
}
// NodeSpec represents the spec of a node.
type NodeSpec struct {
Annotations
Role NodeRole `json:",omitempty"`
Availability NodeAvailability `json:",omitempty"`
}
// NodeRole represents the role of a node.
type NodeRole string
const (
// NodeRoleWorker WORKER
NodeRoleWorker NodeRole = "worker"
// NodeRoleManager MANAGER
NodeRoleManager NodeRole = "manager"
)
// NodeAvailability represents the availability of a node.
type NodeAvailability string
const (
// NodeAvailabilityActive ACTIVE
NodeAvailabilityActive NodeAvailability = "active"
// NodeAvailabilityPause PAUSE
NodeAvailabilityPause NodeAvailability = "pause"
// NodeAvailabilityDrain DRAIN
NodeAvailabilityDrain NodeAvailability = "drain"
)
// NodeDescription represents the description of a node.
type NodeDescription struct {
Hostname string `json:",omitempty"`
Platform Platform `json:",omitempty"`
Resources Resources `json:",omitempty"`
Engine EngineDescription `json:",omitempty"`
TLSInfo TLSInfo `json:",omitempty"`
CSIInfo []NodeCSIInfo `json:",omitempty"`
}
// Platform represents the platform (Arch/OS).
type Platform struct {
Architecture string `json:",omitempty"`
OS string `json:",omitempty"`
}
// EngineDescription represents the description of an engine.
type EngineDescription struct {
EngineVersion string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Plugins []PluginDescription `json:",omitempty"`
}
// NodeCSIInfo represents information about a CSI plugin available on the node
type NodeCSIInfo struct {
// PluginName is the name of the CSI plugin.
PluginName string `json:",omitempty"`
// NodeID is the ID of the node as reported by the CSI plugin. This is
// different from the swarm node ID.
NodeID string `json:",omitempty"`
// MaxVolumesPerNode is the maximum number of volumes that may be published
// to this node
MaxVolumesPerNode int64 `json:",omitempty"`
// AccessibleTopology indicates the location of this node in the CSI
// plugin's topology
AccessibleTopology *Topology `json:",omitempty"`
}
// PluginDescription represents the description of an engine plugin.
type PluginDescription struct {
Type string `json:",omitempty"`
Name string `json:",omitempty"`
}
// NodeStatus represents the status of a node.
type NodeStatus struct {
State NodeState `json:",omitempty"`
Message string `json:",omitempty"`
Addr string `json:",omitempty"`
}
// Reachability represents the reachability of a node.
type Reachability string
const (
// ReachabilityUnknown UNKNOWN
ReachabilityUnknown Reachability = "unknown"
// ReachabilityUnreachable UNREACHABLE
ReachabilityUnreachable Reachability = "unreachable"
// ReachabilityReachable REACHABLE
ReachabilityReachable Reachability = "reachable"
)
// ManagerStatus represents the status of a manager.
type ManagerStatus struct {
Leader bool `json:",omitempty"`
Reachability Reachability `json:",omitempty"`
Addr string `json:",omitempty"`
}
// NodeState represents the state of a node.
type NodeState string
const (
// NodeStateUnknown UNKNOWN
NodeStateUnknown NodeState = "unknown"
// NodeStateDown DOWN
NodeStateDown NodeState = "down"
// NodeStateReady READY
NodeStateReady NodeState = "ready"
// NodeStateDisconnected DISCONNECTED
NodeStateDisconnected NodeState = "disconnected"
)
// Topology defines the CSI topology of this node. This type is a duplicate of
// github.com/docker/docker/api/types.Topology. Because the type definition
// is so simple and to avoid complicated structure or circular imports, we just
// duplicate it here. See that type for full documentation
type Topology struct {
Segments map[string]string `json:",omitempty"`
}

View File

@ -0,0 +1,27 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
// RuntimeType is the type of runtime used for the TaskSpec
type RuntimeType string
// RuntimeURL is the proto type url
type RuntimeURL string
const (
// RuntimeContainer is the container based runtime
RuntimeContainer RuntimeType = "container"
// RuntimePlugin is the plugin based runtime
RuntimePlugin RuntimeType = "plugin"
// RuntimeNetworkAttachment is the network attachment runtime
RuntimeNetworkAttachment RuntimeType = "attachment"
// RuntimeURLContainer is the proto url for the container type
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
// RuntimeURLPlugin is the proto url for the plugin type
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
)
// NetworkAttachmentSpec represents the runtime spec type for network
// attachment tasks
type NetworkAttachmentSpec struct {
ContainerID string
}

View File

@ -0,0 +1,3 @@
//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
package runtime // import "github.com/docker/docker/api/types/swarm/runtime"

View File

@ -0,0 +1,808 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: plugin.proto
package runtime
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
type PluginSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"`
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"`
}
func (m *PluginSpec) Reset() { *m = PluginSpec{} }
func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
func (*PluginSpec) ProtoMessage() {}
func (*PluginSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_22a625af4bc1cc87, []int{0}
}
func (m *PluginSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *PluginSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginSpec.Merge(m, src)
}
func (m *PluginSpec) XXX_Size() int {
return m.Size()
}
func (m *PluginSpec) XXX_DiscardUnknown() {
xxx_messageInfo_PluginSpec.DiscardUnknown(m)
}
var xxx_messageInfo_PluginSpec proto.InternalMessageInfo
func (m *PluginSpec) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PluginSpec) GetRemote() string {
if m != nil {
return m.Remote
}
return ""
}
func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
if m != nil {
return m.Privileges
}
return nil
}
func (m *PluginSpec) GetDisabled() bool {
if m != nil {
return m.Disabled
}
return false
}
func (m *PluginSpec) GetEnv() []string {
if m != nil {
return m.Env
}
return nil
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"`
}
func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
func (*PluginPrivilege) ProtoMessage() {}
func (*PluginPrivilege) Descriptor() ([]byte, []int) {
return fileDescriptor_22a625af4bc1cc87, []int{1}
}
func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *PluginPrivilege) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginPrivilege.Merge(m, src)
}
func (m *PluginPrivilege) XXX_Size() int {
return m.Size()
}
func (m *PluginPrivilege) XXX_DiscardUnknown() {
xxx_messageInfo_PluginPrivilege.DiscardUnknown(m)
}
var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo
func (m *PluginPrivilege) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PluginPrivilege) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *PluginPrivilege) GetValue() []string {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
}
func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) }
var fileDescriptor_22a625af4bc1cc87 = []byte{
// 225 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16,
0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60,
0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25,
0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a,
0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98,
0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14,
0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c,
0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab,
0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33,
0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36,
0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00,
0x00,
}
func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Env) > 0 {
for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Env[iNdEx])
copy(dAtA[i:], m.Env[iNdEx])
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx])))
i--
dAtA[i] = 0x2a
}
}
if m.Disabled {
i--
if m.Disabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x20
}
if len(m.Privileges) > 0 {
for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPlugin(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Remote) > 0 {
i -= len(m.Remote)
copy(dAtA[i:], m.Remote)
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Value) > 0 {
for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Value[iNdEx])
copy(dAtA[i:], m.Value[iNdEx])
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
if len(m.Description) > 0 {
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
offset -= sovPlugin(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *PluginSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
l = len(m.Remote)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
if len(m.Privileges) > 0 {
for _, e := range m.Privileges {
l = e.Size()
n += 1 + l + sovPlugin(uint64(l))
}
}
if m.Disabled {
n += 2
}
if len(m.Env) > 0 {
for _, s := range m.Env {
l = len(s)
n += 1 + l + sovPlugin(uint64(l))
}
}
return n
}
func (m *PluginPrivilege) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
l = len(m.Description)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
if len(m.Value) > 0 {
for _, s := range m.Value {
l = len(s)
n += 1 + l + sovPlugin(uint64(l))
}
}
return n
}
func sovPlugin(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozPlugin(x uint64) (n int) {
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *PluginSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Remote = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Privileges = append(m.Privileges, &PluginPrivilege{})
if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Disabled = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Env = append(m.Env, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPlugin(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthPlugin
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthPlugin
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPlugin(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthPlugin
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPlugin(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthPlugin
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupPlugin
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthPlugin
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group")
)

View File

@ -0,0 +1,19 @@
syntax = "proto3";
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
message PluginSpec {
string name = 1;
string remote = 2;
repeated PluginPrivilege privileges = 3;
bool disabled = 4;
repeated string env = 5;
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
message PluginPrivilege {
string name = 1;
string description = 2;
repeated string value = 3;
}

View File

@ -0,0 +1,36 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import "os"
// Secret represents a secret.
type Secret struct {
ID string
Meta
Spec SecretSpec
}
// SecretSpec represents a secret specification from a secret in swarm
type SecretSpec struct {
Annotations
Data []byte `json:",omitempty"`
Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
// Templating controls whether and how to evaluate the secret payload as
// a template. If it is not set, no templating is used.
Templating *Driver `json:",omitempty"`
}
// SecretReferenceFileTarget is a file target in a secret reference
type SecretReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// SecretReference is a reference to a secret in swarm
type SecretReference struct {
File *SecretReferenceFileTarget
SecretID string
SecretName string
}

View File

@ -0,0 +1,202 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import "time"
// Service represents a service.
type Service struct {
ID string
Meta
Spec ServiceSpec `json:",omitempty"`
PreviousSpec *ServiceSpec `json:",omitempty"`
Endpoint Endpoint `json:",omitempty"`
UpdateStatus *UpdateStatus `json:",omitempty"`
// ServiceStatus is an optional, extra field indicating the number of
// desired and running tasks. It is provided primarily as a shortcut to
// calculating these values client-side, which otherwise would require
// listing all tasks for a service, an operation that could be
// computation and network expensive.
ServiceStatus *ServiceStatus `json:",omitempty"`
// JobStatus is the status of a Service which is in one of ReplicatedJob or
// GlobalJob modes. It is absent on Replicated and Global services.
JobStatus *JobStatus `json:",omitempty"`
}
// ServiceSpec represents the spec of a service.
type ServiceSpec struct {
Annotations
// TaskTemplate defines how the service should construct new tasks when
// orchestrating this service.
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`
RollbackConfig *UpdateConfig `json:",omitempty"`
// Networks specifies which networks the service should attach to.
//
// Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead.
Networks []NetworkAttachmentConfig `json:",omitempty"`
EndpointSpec *EndpointSpec `json:",omitempty"`
}
// ServiceMode represents the mode of a service.
type ServiceMode struct {
Replicated *ReplicatedService `json:",omitempty"`
Global *GlobalService `json:",omitempty"`
ReplicatedJob *ReplicatedJob `json:",omitempty"`
GlobalJob *GlobalJob `json:",omitempty"`
}
// UpdateState is the state of a service update.
type UpdateState string
const (
// UpdateStateUpdating is the updating state.
UpdateStateUpdating UpdateState = "updating"
// UpdateStatePaused is the paused state.
UpdateStatePaused UpdateState = "paused"
// UpdateStateCompleted is the completed state.
UpdateStateCompleted UpdateState = "completed"
// UpdateStateRollbackStarted is the state with a rollback in progress.
UpdateStateRollbackStarted UpdateState = "rollback_started"
// UpdateStateRollbackPaused is the state with a rollback in progress.
UpdateStateRollbackPaused UpdateState = "rollback_paused"
// UpdateStateRollbackCompleted is the state with a rollback in progress.
UpdateStateRollbackCompleted UpdateState = "rollback_completed"
)
// UpdateStatus reports the status of a service update.
type UpdateStatus struct {
State UpdateState `json:",omitempty"`
StartedAt *time.Time `json:",omitempty"`
CompletedAt *time.Time `json:",omitempty"`
Message string `json:",omitempty"`
}
// ReplicatedService is a kind of ServiceMode.
type ReplicatedService struct {
Replicas *uint64 `json:",omitempty"`
}
// GlobalService is a kind of ServiceMode.
type GlobalService struct{}
// ReplicatedJob is the a type of Service which executes a defined Tasks
// in parallel until the specified number of Tasks have succeeded.
type ReplicatedJob struct {
// MaxConcurrent indicates the maximum number of Tasks that should be
// executing simultaneously for this job at any given time. There may be
// fewer Tasks that MaxConcurrent executing simultaneously; for example, if
// there are fewer than MaxConcurrent tasks needed to reach
// TotalCompletions.
//
// If this field is empty, it will default to a max concurrency of 1.
MaxConcurrent *uint64 `json:",omitempty"`
// TotalCompletions is the total number of Tasks desired to run to
// completion.
//
// If this field is empty, the value of MaxConcurrent will be used.
TotalCompletions *uint64 `json:",omitempty"`
}
// GlobalJob is the type of a Service which executes a Task on every Node
// matching the Service's placement constraints. These tasks run to completion
// and then exit.
//
// This type is deliberately empty.
type GlobalJob struct{}
const (
// UpdateFailureActionPause PAUSE
UpdateFailureActionPause = "pause"
// UpdateFailureActionContinue CONTINUE
UpdateFailureActionContinue = "continue"
// UpdateFailureActionRollback ROLLBACK
UpdateFailureActionRollback = "rollback"
// UpdateOrderStopFirst STOP_FIRST
UpdateOrderStopFirst = "stop-first"
// UpdateOrderStartFirst START_FIRST
UpdateOrderStartFirst = "start-first"
)
// UpdateConfig represents the update configuration.
type UpdateConfig struct {
// Maximum number of tasks to be updated in one iteration.
// 0 means unlimited parallelism.
Parallelism uint64
// Amount of time between updates.
Delay time.Duration `json:",omitempty"`
// FailureAction is the action to take when an update failures.
FailureAction string `json:",omitempty"`
// Monitor indicates how long to monitor a task for failure after it is
// created. If the task fails by ending up in one of the states
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
// this counts as a failure. If it fails after Monitor, it does not
// count as a failure. If Monitor is unspecified, a default value will
// be used.
Monitor time.Duration `json:",omitempty"`
// MaxFailureRatio is the fraction of tasks that may fail during
// an update before the failure action is invoked. Any task created by
// the current update which ends up in one of the states REJECTED,
// COMPLETED or FAILED within Monitor from its creation counts as a
// failure. The number of failures is divided by the number of tasks
// being updated, and if this fraction is greater than
// MaxFailureRatio, the failure action is invoked.
//
// If the failure action is CONTINUE, there is no effect.
// If the failure action is PAUSE, no more tasks will be updated until
// another update is started.
MaxFailureRatio float32
// Order indicates the order of operations when rolling out an updated
// task. Either the old task is shut down before the new task is
// started, or the new task is started before the old task is shut down.
Order string
}
// ServiceStatus represents the number of running tasks in a service and the
// number of tasks desired to be running.
type ServiceStatus struct {
// RunningTasks is the number of tasks for the service actually in the
// Running state
RunningTasks uint64
// DesiredTasks is the number of tasks desired to be running by the
// service. For replicated services, this is the replica count. For global
// services, this is computed by taking the number of tasks with desired
// state of not-Shutdown.
DesiredTasks uint64
// CompletedTasks is the number of tasks in the state Completed, if this
// service is in ReplicatedJob or GlobalJob mode. This field must be
// cross-referenced with the service type, because the default value of 0
// may mean that a service is not in a job mode, or it may mean that the
// job has yet to complete any tasks.
CompletedTasks uint64
}
// JobStatus is the status of a job-type service.
type JobStatus struct {
// JobIteration is a value increased each time a Job is executed,
// successfully or otherwise. "Executed", in this case, means the job as a
// whole has been started, not that an individual Task has been launched. A
// job is "Executed" when its ServiceSpec is updated. JobIteration can be
// used to disambiguate Tasks belonging to different executions of a job.
//
// Though JobIteration will increase with each subsequent execution, it may
// not necessarily increase by 1, and so JobIteration should not be used to
// keep track of the number of times a job has been executed.
JobIteration Version
// LastExecution is the time that the job was last executed, as observed by
// Swarm manager.
LastExecution time.Time `json:",omitempty"`
}

View File

@ -0,0 +1,20 @@
package swarm
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ServiceCreateResponse contains the information returned to a client on the
// creation of a new service.
//
// swagger:model ServiceCreateResponse
type ServiceCreateResponse struct {
// The ID of the created service.
ID string `json:"ID,omitempty"`
// Optional warning message.
//
// FIXME(thaJeztah): this should have "omitempty" in the generated type.
//
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,12 @@
package swarm
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ServiceUpdateResponse service update response
// swagger:model ServiceUpdateResponse
type ServiceUpdateResponse struct {
// Optional warning messages
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,237 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"
)
// ClusterInfo represents info about the cluster for outputting in "info"
// it contains the same information as "Swarm", but without the JoinTokens
type ClusterInfo struct {
ID string
Meta
Spec Spec
TLSInfo TLSInfo
RootRotationInProgress bool
DefaultAddrPool []string
SubnetSize uint32
DataPathPort uint32
}
// Swarm represents a swarm.
type Swarm struct {
ClusterInfo
JoinTokens JoinTokens
}
// JoinTokens contains the tokens workers and managers need to join the swarm.
type JoinTokens struct {
// Worker is the join token workers may use to join the swarm.
Worker string
// Manager is the join token managers may use to join the swarm.
Manager string
}
// Spec represents the spec of a swarm.
type Spec struct {
Annotations
Orchestration OrchestrationConfig `json:",omitempty"`
Raft RaftConfig `json:",omitempty"`
Dispatcher DispatcherConfig `json:",omitempty"`
CAConfig CAConfig `json:",omitempty"`
TaskDefaults TaskDefaults `json:",omitempty"`
EncryptionConfig EncryptionConfig `json:",omitempty"`
}
// OrchestrationConfig represents orchestration configuration.
type OrchestrationConfig struct {
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
// node. If negative, never remove completed or failed tasks.
TaskHistoryRetentionLimit *int64 `json:",omitempty"`
}
// TaskDefaults parameterizes cluster-level task creation with default values.
type TaskDefaults struct {
// LogDriver selects the log driver to use for tasks created in the
// orchestrator if unspecified by a service.
//
// Updating this value will only have an affect on new tasks. Old tasks
// will continue use their previously configured log driver until
// recreated.
LogDriver *Driver `json:",omitempty"`
}
// EncryptionConfig controls at-rest encryption of data and keys.
type EncryptionConfig struct {
// AutoLockManagers specifies whether or not managers TLS keys and raft data
// should be encrypted at rest in such a way that they must be unlocked
// before the manager node starts up again.
AutoLockManagers bool
}
// RaftConfig represents raft configuration.
type RaftConfig struct {
// SnapshotInterval is the number of log entries between snapshots.
SnapshotInterval uint64 `json:",omitempty"`
// KeepOldSnapshots is the number of snapshots to keep beyond the
// current snapshot.
KeepOldSnapshots *uint64 `json:",omitempty"`
// LogEntriesForSlowFollowers is the number of log entries to keep
// around to sync up slow followers after a snapshot is created.
LogEntriesForSlowFollowers uint64 `json:",omitempty"`
// ElectionTick is the number of ticks that a follower will wait for a message
// from the leader before becoming a candidate and starting an election.
// ElectionTick must be greater than HeartbeatTick.
//
// A tick currently defaults to one second, so these translate directly to
// seconds currently, but this is NOT guaranteed.
ElectionTick int
// HeartbeatTick is the number of ticks between heartbeats. Every
// HeartbeatTick ticks, the leader will send a heartbeat to the
// followers.
//
// A tick currently defaults to one second, so these translate directly to
// seconds currently, but this is NOT guaranteed.
HeartbeatTick int
}
// DispatcherConfig represents dispatcher configuration.
type DispatcherConfig struct {
// HeartbeatPeriod defines how often agent should send heartbeats to
// dispatcher.
HeartbeatPeriod time.Duration `json:",omitempty"`
}
// CAConfig represents CA configuration.
type CAConfig struct {
// NodeCertExpiry is the duration certificates should be issued for
NodeCertExpiry time.Duration `json:",omitempty"`
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
ExternalCAs []*ExternalCA `json:",omitempty"`
// SigningCACert and SigningCAKey specify the desired signing root CA and
// root CA key for the swarm. When inspecting the cluster, the key will
// be redacted.
SigningCACert string `json:",omitempty"`
SigningCAKey string `json:",omitempty"`
// If this value changes, and there is no specified signing cert and key,
// then the swarm is forced to generate a new root certificate ane key.
ForceRotate uint64 `json:",omitempty"`
}
// ExternalCAProtocol represents type of external CA.
type ExternalCAProtocol string
// ExternalCAProtocolCFSSL CFSSL
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
// ExternalCA defines external CA to be used by the cluster.
type ExternalCA struct {
// Protocol is the protocol used by this external CA.
Protocol ExternalCAProtocol
// URL is the URL where the external CA can be reached.
URL string
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `json:",omitempty"`
// CACert specifies which root CA is used by this external CA. This certificate must
// be in PEM format.
CACert string
}
// InitRequest is the request used to init a swarm.
type InitRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
DataPathPort uint32
ForceNewCluster bool
Spec Spec
AutoLockManagers bool
Availability NodeAvailability
DefaultAddrPool []string
SubnetSize uint32
}
// JoinRequest is the request used to join a swarm.
type JoinRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
RemoteAddrs []string
JoinToken string // accept by secret
Availability NodeAvailability
}
// UnlockRequest is the request used to unlock a swarm.
type UnlockRequest struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// LocalNodeState represents the state of the local node.
type LocalNodeState string
const (
// LocalNodeStateInactive INACTIVE
LocalNodeStateInactive LocalNodeState = "inactive"
// LocalNodeStatePending PENDING
LocalNodeStatePending LocalNodeState = "pending"
// LocalNodeStateActive ACTIVE
LocalNodeStateActive LocalNodeState = "active"
// LocalNodeStateError ERROR
LocalNodeStateError LocalNodeState = "error"
// LocalNodeStateLocked LOCKED
LocalNodeStateLocked LocalNodeState = "locked"
)
// Info represents generic information about swarm.
type Info struct {
NodeID string
NodeAddr string
LocalNodeState LocalNodeState
ControlAvailable bool
Error string
RemoteManagers []Peer
Nodes int `json:",omitempty"`
Managers int `json:",omitempty"`
Cluster *ClusterInfo `json:",omitempty"`
Warnings []string `json:",omitempty"`
}
// Status provides information about the current swarm status and role,
// obtained from the "Swarm" header in the API response.
type Status struct {
// NodeState represents the state of the node.
NodeState LocalNodeState
// ControlAvailable indicates if the node is a swarm manager.
ControlAvailable bool
}
// Peer represents a peer.
type Peer struct {
NodeID string
Addr string
}
// UpdateFlags contains flags for SwarmUpdate.
type UpdateFlags struct {
RotateWorkerToken bool
RotateManagerToken bool
RotateManagerUnlockKey bool
}

225
vendor/github.com/docker/docker/api/types/swarm/task.go generated vendored Normal file
View File

@ -0,0 +1,225 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"
"github.com/docker/docker/api/types/swarm/runtime"
)
// TaskState represents the state of a task.
type TaskState string
const (
// TaskStateNew NEW
TaskStateNew TaskState = "new"
// TaskStateAllocated ALLOCATED
TaskStateAllocated TaskState = "allocated"
// TaskStatePending PENDING
TaskStatePending TaskState = "pending"
// TaskStateAssigned ASSIGNED
TaskStateAssigned TaskState = "assigned"
// TaskStateAccepted ACCEPTED
TaskStateAccepted TaskState = "accepted"
// TaskStatePreparing PREPARING
TaskStatePreparing TaskState = "preparing"
// TaskStateReady READY
TaskStateReady TaskState = "ready"
// TaskStateStarting STARTING
TaskStateStarting TaskState = "starting"
// TaskStateRunning RUNNING
TaskStateRunning TaskState = "running"
// TaskStateComplete COMPLETE
TaskStateComplete TaskState = "complete"
// TaskStateShutdown SHUTDOWN
TaskStateShutdown TaskState = "shutdown"
// TaskStateFailed FAILED
TaskStateFailed TaskState = "failed"
// TaskStateRejected REJECTED
TaskStateRejected TaskState = "rejected"
// TaskStateRemove REMOVE
TaskStateRemove TaskState = "remove"
// TaskStateOrphaned ORPHANED
TaskStateOrphaned TaskState = "orphaned"
)
// Task represents a task.
type Task struct {
ID string
Meta
Annotations
Spec TaskSpec `json:",omitempty"`
ServiceID string `json:",omitempty"`
Slot int `json:",omitempty"`
NodeID string `json:",omitempty"`
Status TaskStatus `json:",omitempty"`
DesiredState TaskState `json:",omitempty"`
NetworksAttachments []NetworkAttachment `json:",omitempty"`
GenericResources []GenericResource `json:",omitempty"`
// JobIteration is the JobIteration of the Service that this Task was
// spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
// used to determine which Tasks belong to which run of the job. This field
// is absent if the Service mode is Replicated or Global.
JobIteration *Version `json:",omitempty"`
// Volumes is the list of VolumeAttachments for this task. It specifies
// which particular volumes are to be used by this particular task, and
// fulfilling what mounts in the spec.
Volumes []VolumeAttachment
}
// TaskSpec represents the spec of a task.
type TaskSpec struct {
// ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive.
// PluginSpec is only used when the `Runtime` field is set to `plugin`
// NetworkAttachmentSpec is used if the `Runtime` field is set to
// `attachment`.
ContainerSpec *ContainerSpec `json:",omitempty"`
PluginSpec *runtime.PluginSpec `json:",omitempty"`
NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"`
Resources *ResourceRequirements `json:",omitempty"`
RestartPolicy *RestartPolicy `json:",omitempty"`
Placement *Placement `json:",omitempty"`
Networks []NetworkAttachmentConfig `json:",omitempty"`
// LogDriver specifies the LogDriver to use for tasks created from this
// spec. If not present, the one on cluster default on swarm.Spec will be
// used, finally falling back to the engine default if not specified.
LogDriver *Driver `json:",omitempty"`
// ForceUpdate is a counter that triggers an update even if no relevant
// parameters have been changed.
ForceUpdate uint64
Runtime RuntimeType `json:",omitempty"`
}
// Resources represents resources (CPU/Memory) which can be advertised by a
// node and requested to be reserved for a task.
type Resources struct {
NanoCPUs int64 `json:",omitempty"`
MemoryBytes int64 `json:",omitempty"`
GenericResources []GenericResource `json:",omitempty"`
}
// Limit describes limits on resources which can be requested by a task.
type Limit struct {
NanoCPUs int64 `json:",omitempty"`
MemoryBytes int64 `json:",omitempty"`
Pids int64 `json:",omitempty"`
}
// GenericResource represents a "user defined" resource which can
// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
type GenericResource struct {
NamedResourceSpec *NamedGenericResource `json:",omitempty"`
DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
}
// NamedGenericResource represents a "user defined" resource which is defined
// as a string.
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
type NamedGenericResource struct {
Kind string `json:",omitempty"`
Value string `json:",omitempty"`
}
// DiscreteGenericResource represents a "user defined" resource which is defined
// as an integer
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
// Value is used to count the resource (SSD=5, HDD=3, ...)
type DiscreteGenericResource struct {
Kind string `json:",omitempty"`
Value int64 `json:",omitempty"`
}
// ResourceRequirements represents resources requirements.
type ResourceRequirements struct {
Limits *Limit `json:",omitempty"`
Reservations *Resources `json:",omitempty"`
}
// Placement represents orchestration parameters.
type Placement struct {
Constraints []string `json:",omitempty"`
Preferences []PlacementPreference `json:",omitempty"`
MaxReplicas uint64 `json:",omitempty"`
// Platforms stores all the platforms that the image can run on.
// This field is used in the platform filter for scheduling. If empty,
// then the platform filter is off, meaning there are no scheduling restrictions.
Platforms []Platform `json:",omitempty"`
}
// PlacementPreference provides a way to make the scheduler aware of factors
// such as topology.
type PlacementPreference struct {
Spread *SpreadOver
}
// SpreadOver is a scheduling preference that instructs the scheduler to spread
// tasks evenly over groups of nodes identified by labels.
type SpreadOver struct {
// label descriptor, such as engine.labels.az
SpreadDescriptor string
}
// RestartPolicy represents the restart policy.
type RestartPolicy struct {
Condition RestartPolicyCondition `json:",omitempty"`
Delay *time.Duration `json:",omitempty"`
MaxAttempts *uint64 `json:",omitempty"`
Window *time.Duration `json:",omitempty"`
}
// RestartPolicyCondition represents when to restart.
type RestartPolicyCondition string
const (
// RestartPolicyConditionNone NONE
RestartPolicyConditionNone RestartPolicyCondition = "none"
// RestartPolicyConditionOnFailure ON_FAILURE
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
// RestartPolicyConditionAny ANY
RestartPolicyConditionAny RestartPolicyCondition = "any"
)
// TaskStatus represents the status of a task.
type TaskStatus struct {
Timestamp time.Time `json:",omitempty"`
State TaskState `json:",omitempty"`
Message string `json:",omitempty"`
Err string `json:",omitempty"`
ContainerStatus *ContainerStatus `json:",omitempty"`
PortStatus PortStatus `json:",omitempty"`
}
// ContainerStatus represents the status of a container.
type ContainerStatus struct {
ContainerID string
PID int
ExitCode int
}
// PortStatus represents the port status of a task's host ports whose
// service has published host ports
type PortStatus struct {
Ports []PortConfig `json:",omitempty"`
}
// VolumeAttachment contains the associating a Volume to a Task.
type VolumeAttachment struct {
// ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId.
ID string `json:",omitempty"`
// Source, together with Target, indicates the Mount, as specified in the
// ContainerSpec, that this volume fulfills.
Source string `json:",omitempty"`
// Target, together with Source, indicates the Mount, as specified
// in the ContainerSpec, that this volume fulfills.
Target string `json:",omitempty"`
}

View File

@ -0,0 +1,155 @@
package system
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
)
// Info contains response of Engine API:
// GET "/info"
type Info struct {
ID string
Containers int
ContainersRunning int
ContainersPaused int
ContainersStopped int
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2.
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
CPUShares bool
CPUSet bool
PidsLimit bool
IPv4Forwarding bool
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
NFd int
OomKillDisable bool
NGoroutines int
SystemTime string
LoggingDriver string
CgroupDriver string
CgroupVersion string `json:",omitempty"`
NEventsListener int
KernelVersion string
OperatingSystem string
OSVersion string
OSType string
Architecture string
IndexServerAddress string
RegistryConfig *registry.ServiceConfig
NCPU int
MemTotal int64
GenericResources []swarm.GenericResource
DockerRootDir string
HTTPProxy string `json:"HttpProxy"`
HTTPSProxy string `json:"HttpsProxy"`
NoProxy string
Name string
Labels []string
ExperimentalBuild bool
ServerVersion string
Runtimes map[string]RuntimeWithStatus
DefaultRuntime string
Swarm swarm.Info
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
CDISpecDirs []string
Containerd *ContainerdInfo `json:",omitempty"`
// Legacy API fields for older API versions.
legacyFields
// Warnings contains a slice of warnings that occurred while collecting
// system information. These warnings are intended to be informational
// messages for the user, and are not intended to be parsed / used for
// other purposes, as they do not have a fixed format.
Warnings []string
}
// ContainerdInfo holds information about the containerd instance used by the daemon.
type ContainerdInfo struct {
// Address is the path to the containerd socket.
Address string `json:",omitempty"`
// Namespaces is the containerd namespaces used by the daemon.
Namespaces ContainerdNamespaces
}
// ContainerdNamespaces reflects the containerd namespaces used by the daemon.
//
// These namespaces can be configured in the daemon configuration, and are
// considered to be used exclusively by the daemon,
//
// As these namespaces are considered to be exclusively accessed
// by the daemon, it is not recommended to change these values,
// or to change them to a value that is used by other systems,
// such as cri-containerd.
type ContainerdNamespaces struct {
// Containers holds the default containerd namespace used for
// containers managed by the daemon.
//
// The default namespace for containers is "moby", but will be
// suffixed with the `<uid>.<gid>` of the remapped `root` if
// user-namespaces are enabled and the containerd image-store
// is used.
Containers string
// Plugins holds the default containerd namespace used for
// plugins managed by the daemon.
//
// The default namespace for plugins is "moby", but will be
// suffixed with the `<uid>.<gid>` of the remapped `root` if
// user-namespaces are enabled and the containerd image-store
// is used.
Plugins string
}
type legacyFields struct {
ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions.
}
// PluginsInfo is a temp struct holding Plugins name
// registered with docker daemon. It is used by [Info] struct
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
// List of Network plugins registered
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
// in the version-string of external tools, such as containerd, or runC.
type Commit struct {
ID string // ID is the actual commit ID of external tool.
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
}
// NetworkAddressPool is a temp struct used by [Info] struct.
type NetworkAddressPool struct {
Base string
Size int
}

View File

@ -0,0 +1,20 @@
package system
// Runtime describes an OCI runtime
type Runtime struct {
// "Legacy" runtime configuration for runc-compatible runtimes.
Path string `json:"path,omitempty"`
Args []string `json:"runtimeArgs,omitempty"`
// Shimv2 runtime configuration. Mutually exclusive with the legacy config above.
Type string `json:"runtimeType,omitempty"`
Options map[string]interface{} `json:"options,omitempty"`
}
// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus].
type RuntimeWithStatus struct {
Runtime
Status map[string]string `json:"status,omitempty"`
}

View File

@ -0,0 +1,48 @@
package system
import (
"errors"
"fmt"
"strings"
)
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a
// type-safe [SecurityOpt].
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
for _, s := range strings.Split(opt, ",") {
k, v, ok := strings.Cut(s, "=")
if !ok {
return nil, fmt.Errorf("invalid security option %q", s)
}
if k == "" || v == "" {
return nil, errors.New("invalid empty security option")
}
if k == "name" {
secopt.Name = v
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v})
}
so = append(so, secopt)
}
return so, nil
}
// KeyValue holds a key/value pair.
type KeyValue struct {
Key, Value string
}

View File

@ -0,0 +1,131 @@
package time // import "github.com/docker/docker/api/types/time"
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// These are additional predefined layouts for use in Time.Format and Time.Parse
// with --since and --until parameters for `docker logs` and `docker events`
const (
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
)
// GetTimestamp tries to parse given string as golang duration,
// then RFC3339 time and finally as a Unix timestamp. If
// any of these were successful, it returns a Unix timestamp
// as string otherwise returns the given value back.
// In case of duration input, the returned timestamp is computed
// as the given reference time minus the amount of the duration.
func GetTimestamp(value string, reference time.Time) (string, error) {
if d, err := time.ParseDuration(value); value != "0" && err == nil {
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
}
var format string
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") {
if parseInLocation {
format = rFC3339NanoLocal
} else {
format = time.RFC3339Nano
}
} else if strings.Contains(value, "T") {
// we want the number of colons in the T portion of the timestamp
tcolons := strings.Count(value, ":")
// if parseInLocation is off and we have a +/- zone offset (not Z) then
// there will be an extra colon in the input for the tz offset subtract that
// colon from the tcolons count
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
tcolons--
}
if parseInLocation {
switch tcolons {
case 0:
format = "2006-01-02T15"
case 1:
format = "2006-01-02T15:04"
default:
format = rFC3339Local
}
} else {
switch tcolons {
case 0:
format = "2006-01-02T15Z07:00"
case 1:
format = "2006-01-02T15:04Z07:00"
default:
format = time.RFC3339
}
}
} else if parseInLocation {
format = dateLocal
} else {
format = dateWithZone
}
var t time.Time
var err error
if parseInLocation {
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
} else {
t, err = time.Parse(format, value)
}
if err != nil {
// if there is a `-` then it's an RFC3339 like timestamp
if strings.Contains(value, "-") {
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
}
if _, _, err := parseTimestamp(value); err != nil {
return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
}
return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
}
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
}
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has
// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())).
// If the incoming nanosecond portion is longer than 9 digits it is truncated.
// The expectation is that the seconds and nanoseconds will be used to create a
// time variable. For example:
//
// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0)
// since := time.Unix(seconds, nanoseconds)
//
// returns seconds as defaultSeconds if value == ""
func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) {
if value == "" {
return defaultSeconds, 0, nil
}
return parseTimestamp(value)
}
func parseTimestamp(value string) (sec int64, nsec int64, err error) {
s, n, ok := strings.Cut(value, ".")
sec, err = strconv.ParseInt(s, 10, 64)
if err != nil {
return sec, 0, err
}
if !ok {
return sec, 0, nil
}
nsec, err = strconv.ParseInt(n, 10, 64)
if err != nil {
return sec, nsec, err
}
// should already be in nanoseconds but just in case convert n to nanoseconds
nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n))))
return sec, nsec, nil
}

487
vendor/github.com/docker/docker/api/types/types.go generated vendored Normal file
View File

@ -0,0 +1,487 @@
package types // import "github.com/docker/docker/api/types"
import (
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
"github.com/docker/go-connections/nat"
)
const (
// MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams
MediaTypeRawStream = "application/vnd.docker.raw-stream"
// MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams
MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream"
)
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string `json:",omitempty"`
Layers []string `json:",omitempty"`
}
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
// ID is the content-addressable ID of an image.
//
// This identifier is a content-addressable digest calculated from the
// image's configuration (which includes the digests of layers used by
// the image).
//
// Note that this digest differs from the `RepoDigests` below, which
// holds digests of image manifests that reference the image.
ID string `json:"Id"`
// RepoTags is a list of image names/tags in the local image cache that
// reference this image.
//
// Multiple image tags can refer to the same image, and this list may be
// empty if no tags reference the image, in which case the image is
// "untagged", in which case it can still be referenced by its ID.
RepoTags []string
// RepoDigests is a list of content-addressable digests of locally available
// image manifests that the image is referenced from. Multiple manifests can
// refer to the same image.
//
// These digests are usually only available if the image was either pulled
// from a registry, or if the image was pushed to a registry, which is when
// the manifest is generated and its digest calculated.
RepoDigests []string
// Parent is the ID of the parent image.
//
// Depending on how the image was created, this field may be empty and
// is only set for images that were built/created locally. This field
// is empty if the image was pulled from an image registry.
Parent string
// Comment is an optional message that can be set when committing or
// importing the image.
Comment string
// Created is the date and time at which the image was created, formatted in
// RFC 3339 nano-seconds (time.RFC3339Nano).
//
// This information is only available if present in the image,
// and omitted otherwise.
Created string `json:",omitempty"`
// Container is the ID of the container that was used to create the image.
//
// Depending on how the image was created, this field may be empty.
//
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
Container string `json:",omitempty"`
// ContainerConfig is an optional field containing the configuration of the
// container that was last committed when creating the image.
//
// Previous versions of Docker builder used this field to store build cache,
// and it is not in active use anymore.
//
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
ContainerConfig *container.Config `json:",omitempty"`
// DockerVersion is the version of Docker that was used to build the image.
//
// Depending on how the image was created, this field may be empty.
DockerVersion string
// Author is the name of the author that was specified when committing the
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
Author string
Config *container.Config
// Architecture is the hardware CPU architecture that the image runs on.
Architecture string
// Variant is the CPU architecture variant (presently ARM-only).
Variant string `json:",omitempty"`
// OS is the Operating System the image is built to run on.
Os string
// OsVersion is the version of the Operating System the image is built to
// run on (especially for Windows).
OsVersion string `json:",omitempty"`
// Size is the total size of the image including all layers it is composed of.
Size int64
// VirtualSize is the total size of the image including all layers it is
// composed of.
//
// Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
VirtualSize int64 `json:"VirtualSize,omitempty"`
// GraphDriver holds information about the storage driver used to store the
// container's and image's filesystem.
GraphDriver GraphDriverData
// RootFS contains information about the image's RootFS, including the
// layer IDs.
RootFS RootFS
// Metadata of the image in the local cache.
//
// This information is local to the daemon, and not part of the image itself.
Metadata image.Metadata
}
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
Names []string
Image string
ImageID string
Command string
Created int64
Ports []Port
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
State string
Status string
HostConfig struct {
NetworkMode string `json:",omitempty"`
Annotations map[string]string `json:",omitempty"`
}
NetworkSettings *SummaryNetworkSettings
Mounts []MountPoint
}
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
OSType string
Experimental bool
BuilderVersion BuilderVersion
// SwarmStatus provides information about the current swarm status of the
// engine, obtained from the "Swarm" header in the API response.
//
// It can be a nil struct if the API version does not provide this header
// in the ping response, or if an error occurred, in which case the client
// should use other ways to get the current swarm status, such as the /swarm
// endpoint.
SwarmStatus *swarm.Status
}
// ComponentVersion describes the version information for a specific component.
type ComponentVersion struct {
Name string
Version string
Details map[string]string `json:",omitempty"`
}
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Platform struct{ Name string } `json:",omitempty"`
Components []ComponentVersion `json:",omitempty"`
// The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
GitCommit string
GoVersion string
Os string
Arch string
KernelVersion string `json:",omitempty"`
Experimental bool `json:",omitempty"`
BuildTime string `json:",omitempty"`
}
// HealthcheckResult stores information about a single run of a healthcheck probe
type HealthcheckResult struct {
Start time.Time // Start is the time this check started
End time.Time // End is the time this check ended
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
Output string // Output from last check
}
// Health states
const (
NoHealthcheck = "none" // Indicates there is no healthcheck
Starting = "starting" // Starting indicates that the container is not yet ready
Healthy = "healthy" // Healthy indicates that the container is running correctly
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
)
// Health stores information about the container's healthcheck results
type Health struct {
Status string // Status is one of Starting, Healthy or Unhealthy
FailingStreak int // FailingStreak is the number of consecutive failures
Log []*HealthcheckResult // Log contains the last few results (oldest first)
}
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Running bool
Paused bool
Restarting bool
OOMKilled bool
Dead bool
Pid int
ExitCode int
Error string
StartedAt string
FinishedAt string
Health *Health `json:",omitempty"`
}
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
Created string
Path string
Args []string
State *ContainerState
Image string
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release.
Name string
RestartCount int
Driver string
Platform string
MountLabel string
ProcessLabel string
AppArmorProfile string
ExecIDs []string
HostConfig *container.HostConfig
GraphDriver GraphDriverData
SizeRw *int64 `json:",omitempty"`
SizeRootFs *int64 `json:",omitempty"`
}
// ContainerJSON is newly used struct along with MountPoint
type ContainerJSON struct {
*ContainerJSONBase
Mounts []MountPoint
Config *container.Config
NetworkSettings *NetworkSettings
}
// NetworkSettings exposes the network settings in the api
type NetworkSettings struct {
NetworkSettingsBase
DefaultNetworkSettings
Networks map[string]*network.EndpointSettings
}
// SummaryNetworkSettings provides a summary of container's networks
// in /containers/json
type SummaryNetworkSettings struct {
Networks map[string]*network.EndpointSettings
}
// NetworkSettingsBase holds networking state for a container when inspecting it.
type NetworkSettingsBase struct {
Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag.
SandboxID string // SandboxID uniquely represents a container's network stack
SandboxKey string // SandboxKey identifies the sandbox
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
// HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
//
// Deprecated: This field is never set and will be removed in a future release.
HairpinMode bool
// LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
//
// Deprecated: This field is never set and will be removed in a future release.
LinkLocalIPv6Address string
// LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
//
// Deprecated: This field is never set and will be removed in a future release.
LinkLocalIPv6PrefixLen int
SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
}
// DefaultNetworkSettings holds network information
// during the 2 release deprecation period.
// It will be removed in Docker 1.11.
type DefaultNetworkSettings struct {
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
Gateway string // Gateway holds the gateway address for the network
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
IPAddress string // IPAddress holds the IPv4 address for the network
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
MacAddress string // MacAddress holds the MAC address for the network
}
// MountPoint represents a mount point configuration inside the container.
// This is used for reporting the mountpoints in use by a container.
type MountPoint struct {
// Type is the type of mount, see `Type<foo>` definitions in
// github.com/docker/docker/api/types/mount.Type
Type mount.Type `json:",omitempty"`
// Name is the name reference to the underlying data defined by `Source`
// e.g., the volume name.
Name string `json:",omitempty"`
// Source is the source location of the mount.
//
// For volumes, this contains the storage location of the volume (within
// `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
// the source (host) part of the bind-mount. For `tmpfs` mount points, this
// field is empty.
Source string
// Destination is the path relative to the container root (`/`) where the
// Source is mounted inside the container.
Destination string
// Driver is the volume driver used to create the volume (if it is a volume).
Driver string `json:",omitempty"`
// Mode is a comma separated list of options supplied by the user when
// creating the bind/volume mount.
//
// The default is platform-specific (`"z"` on Linux, empty on Windows).
Mode string
// RW indicates whether the mount is mounted writable (read-write).
RW bool
// Propagation describes how mounts are propagated from the host into the
// mount point, and vice-versa. Refer to the Linux kernel documentation
// for details:
// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
//
// This field is not used on Windows.
Propagation mount.Propagation
}
// DiskUsageObject represents an object type used for disk usage query filtering.
type DiskUsageObject string
const (
// ContainerObject represents a container DiskUsageObject.
ContainerObject DiskUsageObject = "container"
// ImageObject represents an image DiskUsageObject.
ImageObject DiskUsageObject = "image"
// VolumeObject represents a volume DiskUsageObject.
VolumeObject DiskUsageObject = "volume"
// BuildCacheObject represents a build-cache DiskUsageObject.
BuildCacheObject DiskUsageObject = "build-cache"
)
// DiskUsageOptions holds parameters for system disk usage query.
type DiskUsageOptions struct {
// Types specifies what object types to include in the response. If empty,
// all object types are returned.
Types []DiskUsageObject
}
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*image.Summary
Containers []*Container
Volumes []*volume.Volume
BuildCache []*BuildCache
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
}
// BuildCachePruneReport contains the response for Engine API:
// POST "/build/prune"
type BuildCachePruneReport struct {
CachesDeleted []string
SpaceReclaimed uint64
}
// SecretCreateResponse contains the information returned to a client
// on the creation of a new secret.
type SecretCreateResponse struct {
// ID is the id of the created secret.
ID string
}
// SecretListOptions holds parameters to list secrets
type SecretListOptions struct {
Filters filters.Args
}
// ConfigCreateResponse contains the information returned to a client
// on the creation of a new config.
type ConfigCreateResponse struct {
// ID is the id of the created config.
ID string
}
// ConfigListOptions holds parameters to list configs
type ConfigListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
type PushResult struct {
Tag string
Digest string
Size int
}
// BuildResult contains the image id of a successful build
type BuildResult struct {
ID string
}
// BuildCache contains information about a build cache record.
type BuildCache struct {
// ID is the unique ID of the build cache record.
ID string
// Parent is the ID of the parent build cache record.
//
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
Parent string `json:"Parent,omitempty"`
// Parents is the list of parent build cache record IDs.
Parents []string `json:" Parents,omitempty"`
// Type is the cache record type.
Type string
// Description is a description of the build-step that produced the build cache.
Description string
// InUse indicates if the build cache is in use.
InUse bool
// Shared indicates if the build cache is shared.
Shared bool
// Size is the amount of disk space used by the build cache (in bytes).
Size int64
// CreatedAt is the date and time at which the build cache was created.
CreatedAt time.Time
// LastUsedAt is the date and time at which the build cache was last used.
LastUsedAt *time.Time
UsageCount int
}
// BuildCachePruneOptions hold parameters to prune the build cache
type BuildCachePruneOptions struct {
All bool
KeepStorage int64
Filters filters.Args
}

View File

@ -0,0 +1,210 @@
package types
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/volume"
)
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
//
// Deprecated: use [image.PruneReport].
type ImagesPruneReport = image.PruneReport
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune".
//
// Deprecated: use [volume.PruneReport].
type VolumesPruneReport = volume.PruneReport
// NetworkCreateRequest is the request message sent to the server for network create call.
//
// Deprecated: use [network.CreateRequest].
type NetworkCreateRequest = network.CreateRequest
// NetworkCreate is the expected body of the "create network" http request message
//
// Deprecated: use [network.CreateOptions].
type NetworkCreate = network.CreateOptions
// NetworkListOptions holds parameters to filter the list of networks with.
//
// Deprecated: use [network.ListOptions].
type NetworkListOptions = network.ListOptions
// NetworkCreateResponse is the response message sent by the server for network create call.
//
// Deprecated: use [network.CreateResponse].
type NetworkCreateResponse = network.CreateResponse
// NetworkInspectOptions holds parameters to inspect network.
//
// Deprecated: use [network.InspectOptions].
type NetworkInspectOptions = network.InspectOptions
// NetworkConnect represents the data to be used to connect a container to the network
//
// Deprecated: use [network.ConnectOptions].
type NetworkConnect = network.ConnectOptions
// NetworkDisconnect represents the data to be used to disconnect a container from the network
//
// Deprecated: use [network.DisconnectOptions].
type NetworkDisconnect = network.DisconnectOptions
// EndpointResource contains network resources allocated and used for a container in a network.
//
// Deprecated: use [network.EndpointResource].
type EndpointResource = network.EndpointResource
// NetworkResource is the body of the "get network" http response message/
//
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
type NetworkResource = network.Inspect
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
//
// Deprecated: use [network.PruneReport].
type NetworksPruneReport = network.PruneReport
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
//
// Deprecated: use [container.ExecOptions].
type ExecConfig = container.ExecOptions
// ExecStartCheck is a temp struct used by execStart
// Config fields is part of ExecConfig in runconfig package
//
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
type ExecStartCheck = container.ExecStartOptions
// ContainerExecInspect holds information returned by exec inspect.
//
// Deprecated: use [container.ExecInspect].
type ContainerExecInspect = container.ExecInspect
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
//
// Deprecated: use [container.PruneReport].
type ContainersPruneReport = container.PruneReport
// ContainerPathStat is used to encode the header from
// GET "/containers/{name:.*}/archive"
// "Name" is the file or directory name.
//
// Deprecated: use [container.PathStat].
type ContainerPathStat = container.PathStat
// CopyToContainerOptions holds information
// about files to copy into a container.
//
// Deprecated: use [container.CopyToContainerOptions],
type CopyToContainerOptions = container.CopyToContainerOptions
// ContainerStats contains response of Engine API:
// GET "/stats"
//
// Deprecated: use [container.StatsResponseReader].
type ContainerStats = container.StatsResponseReader
// ThrottlingData stores CPU throttling stats of one running container.
// Not used on Windows.
//
// Deprecated: use [container.ThrottlingData].
type ThrottlingData = container.ThrottlingData
// CPUUsage stores All CPU stats aggregated since container inception.
//
// Deprecated: use [container.CPUUsage].
type CPUUsage = container.CPUUsage
// CPUStats aggregates and wraps all CPU related info of container
//
// Deprecated: use [container.CPUStats].
type CPUStats = container.CPUStats
// MemoryStats aggregates all memory stats since container inception on Linux.
// Windows returns stats for commit and private working set only.
//
// Deprecated: use [container.MemoryStats].
type MemoryStats = container.MemoryStats
// BlkioStatEntry is one small entity to store a piece of Blkio stats
// Not used on Windows.
//
// Deprecated: use [container.BlkioStatEntry].
type BlkioStatEntry = container.BlkioStatEntry
// BlkioStats stores All IO service stats for data read and write.
// This is a Linux specific structure as the differences between expressing
// block I/O on Windows and Linux are sufficiently significant to make
// little sense attempting to morph into a combined structure.
//
// Deprecated: use [container.BlkioStats].
type BlkioStats = container.BlkioStats
// StorageStats is the disk I/O stats for read/write on Windows.
//
// Deprecated: use [container.StorageStats].
type StorageStats = container.StorageStats
// NetworkStats aggregates the network stats of one container
//
// Deprecated: use [container.NetworkStats].
type NetworkStats = container.NetworkStats
// PidsStats contains the stats of a container's pids
//
// Deprecated: use [container.PidsStats].
type PidsStats = container.PidsStats
// Stats is Ultimate struct aggregating all types of stats of one container
//
// Deprecated: use [container.Stats].
type Stats = container.Stats
// StatsJSON is newly used Networks
//
// Deprecated: use [container.StatsResponse].
type StatsJSON = container.StatsResponse
// EventsOptions holds parameters to filter events with.
//
// Deprecated: use [events.ListOptions].
type EventsOptions = events.ListOptions
// ImageSearchOptions holds parameters to search images with.
//
// Deprecated: use [registry.SearchOptions].
type ImageSearchOptions = registry.SearchOptions
// ImageImportSource holds source information for ImageImport
//
// Deprecated: use [image.ImportSource].
type ImageImportSource image.ImportSource
// ImageLoadResponse returns information to the client about a load process.
//
// Deprecated: use [image.LoadResponse].
type ImageLoadResponse = image.LoadResponse
// ContainerNode stores information about the node that a container
// is running on. It's only used by the Docker Swarm standalone API.
//
// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release.
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
Addr string
Name string
Cpus int
Memory int64
Labels map[string]string
}

View File

@ -0,0 +1,65 @@
package versions // import "github.com/docker/docker/api/types/versions"
import (
"strconv"
"strings"
)
// compare compares two version strings
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
func compare(v1, v2 string) int {
if v1 == v2 {
return 0
}
var (
currTab = strings.Split(v1, ".")
otherTab = strings.Split(v2, ".")
)
maxVer := len(currTab)
if len(otherTab) > maxVer {
maxVer = len(otherTab)
}
for i := 0; i < maxVer; i++ {
var currInt, otherInt int
if len(currTab) > i {
currInt, _ = strconv.Atoi(currTab[i])
}
if len(otherTab) > i {
otherInt, _ = strconv.Atoi(otherTab[i])
}
if currInt > otherInt {
return 1
}
if otherInt > currInt {
return -1
}
}
return 0
}
// LessThan checks if a version is less than another
func LessThan(v, other string) bool {
return compare(v, other) == -1
}
// LessThanOrEqualTo checks if a version is less than or equal to another
func LessThanOrEqualTo(v, other string) bool {
return compare(v, other) <= 0
}
// GreaterThan checks if a version is greater than another
func GreaterThan(v, other string) bool {
return compare(v, other) == 1
}
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
func GreaterThanOrEqualTo(v, other string) bool {
return compare(v, other) >= 0
}
// Equal checks if a version is equal to another
func Equal(v, other string) bool {
return compare(v, other) == 0
}

View File

@ -0,0 +1,420 @@
package volume
import (
"github.com/docker/docker/api/types/swarm"
)
// ClusterVolume contains options and information specific to, and only present
// on, Swarm CSI cluster volumes.
type ClusterVolume struct {
// ID is the Swarm ID of the volume. Because cluster volumes are Swarm
// objects, they have an ID, unlike non-cluster volumes, which only have a
// Name. This ID can be used to refer to the cluster volume.
ID string
// Meta is the swarm metadata about this volume.
swarm.Meta
// Spec is the cluster-specific options from which this volume is derived.
Spec ClusterVolumeSpec
// PublishStatus contains the status of the volume as it pertains to its
// publishing on Nodes.
PublishStatus []*PublishStatus `json:",omitempty"`
// Info is information about the global status of the volume.
Info *Info `json:",omitempty"`
}
// ClusterVolumeSpec contains the spec used to create this volume.
type ClusterVolumeSpec struct {
// Group defines the volume group of this volume. Volumes belonging to the
// same group can be referred to by group name when creating Services.
// Referring to a volume by group instructs swarm to treat volumes in that
// group interchangeably for the purpose of scheduling. Volumes with an
// empty string for a group technically all belong to the same, emptystring
// group.
Group string `json:",omitempty"`
// AccessMode defines how the volume is used by tasks.
AccessMode *AccessMode `json:",omitempty"`
// AccessibilityRequirements specifies where in the cluster a volume must
// be accessible from.
//
// This field must be empty if the plugin does not support
// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
// plugin does not support it, volume will not be created.
//
// If AccessibilityRequirements is empty, but the plugin does support
// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
// cluster is a valid target for the volume.
AccessibilityRequirements *TopologyRequirement `json:",omitempty"`
// CapacityRange defines the desired capacity that the volume should be
// created with. If nil, the plugin will decide the capacity.
CapacityRange *CapacityRange `json:",omitempty"`
// Secrets defines Swarm Secrets that are passed to the CSI storage plugin
// when operating on this volume.
Secrets []Secret `json:",omitempty"`
// Availability is the Volume's desired availability. Analogous to Node
// Availability, this allows the user to take volumes offline in order to
// update or delete them.
Availability Availability `json:",omitempty"`
}
// Availability specifies the availability of the volume.
type Availability string
const (
// AvailabilityActive indicates that the volume is active and fully
// schedulable on the cluster.
AvailabilityActive Availability = "active"
// AvailabilityPause indicates that no new workloads should use the
// volume, but existing workloads can continue to use it.
AvailabilityPause Availability = "pause"
// AvailabilityDrain indicates that all workloads using this volume
// should be rescheduled, and the volume unpublished from all nodes.
AvailabilityDrain Availability = "drain"
)
// AccessMode defines the access mode of a volume.
type AccessMode struct {
// Scope defines the set of nodes this volume can be used on at one time.
Scope Scope `json:",omitempty"`
// Sharing defines the number and way that different tasks can use this
// volume at one time.
Sharing SharingMode `json:",omitempty"`
// MountVolume defines options for using this volume as a Mount-type
// volume.
//
// Either BlockVolume or MountVolume, but not both, must be present.
MountVolume *TypeMount `json:",omitempty"`
// BlockVolume defines options for using this volume as a Block-type
// volume.
//
// Either BlockVolume or MountVolume, but not both, must be present.
BlockVolume *TypeBlock `json:",omitempty"`
}
// Scope defines the Scope of a Cluster Volume. This is how many nodes a
// Volume can be accessed simultaneously on.
type Scope string
const (
// ScopeSingleNode indicates the volume can be used on one node at a
// time.
ScopeSingleNode Scope = "single"
// ScopeMultiNode indicates the volume can be used on many nodes at
// the same time.
ScopeMultiNode Scope = "multi"
)
// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a
// Volume at the same time can use it.
type SharingMode string
const (
// SharingNone indicates that only one Task may use the Volume at a
// time.
SharingNone SharingMode = "none"
// SharingReadOnly indicates that the Volume may be shared by any
// number of Tasks, but they must be read-only.
SharingReadOnly SharingMode = "readonly"
// SharingOneWriter indicates that the Volume may be shared by any
// number of Tasks, but all after the first must be read-only.
SharingOneWriter SharingMode = "onewriter"
// SharingAll means that the Volume may be shared by any number of
// Tasks, as readers or writers.
SharingAll SharingMode = "all"
)
// TypeBlock defines options for using a volume as a block-type volume.
//
// Intentionally empty.
type TypeBlock struct{}
// TypeMount contains options for using a volume as a Mount-type
// volume.
type TypeMount struct {
// FsType specifies the filesystem type for the mount volume. Optional.
FsType string `json:",omitempty"`
// MountFlags defines flags to pass when mounting the volume. Optional.
MountFlags []string `json:",omitempty"`
}
// TopologyRequirement expresses the user's requirements for a volume's
// accessible topology.
type TopologyRequirement struct {
// Requisite specifies a list of Topologies, at least one of which the
// volume must be accessible from.
//
// Taken verbatim from the CSI Spec:
//
// Specifies the list of topologies the provisioned volume MUST be
// accessible from.
// This field is OPTIONAL. If TopologyRequirement is specified either
// requisite or preferred or both MUST be specified.
//
// If requisite is specified, the provisioned volume MUST be
// accessible from at least one of the requisite topologies.
//
// Given
// x = number of topologies provisioned volume is accessible from
// n = number of requisite topologies
// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
// If x==n, then the SP MUST make the provisioned volume available to
// all topologies from the list of requisite topologies. If it is
// unable to do so, the SP MUST fail the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
// {"region": "R1", "zone": "Z2"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and both "zone" "Z2" and "zone" "Z3".
//
// If x<n, then the SP SHALL choose x unique topologies from the list
// of requisite topologies. If it is unable to do so, the SP MUST fail
// the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// then the SP may choose to make the provisioned volume available in
// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"}
// then the provisioned volume MUST be accessible from any combination
// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
// "R1/Z4", or "R1/Z3" and "R1/Z4".
//
// If x>n, then the SP MUST make the provisioned volume available from
// all topologies from the list of requisite topologies and MAY choose
// the remaining x-n unique topologies from the list of all possible
// topologies. If it is unable to do so, the SP MUST fail the
// CreateVolume call.
// For example, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2" and the SP may select the second zone
// independently, e.g. "R1/Z4".
Requisite []Topology `json:",omitempty"`
// Preferred is a list of Topologies that the volume should attempt to be
// provisioned in.
//
// Taken from the CSI spec:
//
// Specifies the list of topologies the CO would prefer the volume to
// be provisioned in.
//
// This field is OPTIONAL. If TopologyRequirement is specified either
// requisite or preferred or both MUST be specified.
//
// An SP MUST attempt to make the provisioned volume available using
// the preferred topologies in order from first to last.
//
// If requisite is specified, all topologies in preferred list MUST
// also be present in the list of requisite topologies.
//
// If the SP is unable to make the provisioned volume available
// from any of the preferred topologies, the SP MAY choose a topology
// from the list of requisite topologies.
// If the list of requisite topologies is not specified, then the SP
// MAY choose from the list of all possible topologies.
// If the list of requisite topologies is specified and the SP is
// unable to make the provisioned volume available from any of the
// requisite topologies it MUST fail the CreateVolume call.
//
// Example 1:
// Given a volume should be accessible from a single zone, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// preferred =
// {"region": "R1", "zone": "Z3"}
// then the SP SHOULD first attempt to make the provisioned volume
// available from "zone" "Z3" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible.
//
// Example 2:
// Given a volume should be accessible from a single zone, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z5"}
// preferred =
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z2"}
// then the SP SHOULD first attempt to make the provisioned volume
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
// is not possible, the SP may choose between either the "zone"
// "Z3" or "Z5" in the "region" "R1".
//
// Example 3:
// Given a volume should be accessible from TWO zones (because an
// opaque parameter in CreateVolumeRequest, for example, specifies
// the volume is accessible from two zones, aka synchronously
// replicated), and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z5"}
// preferred =
// {"region": "R1", "zone": "Z5"},
// {"region": "R1", "zone": "Z3"}
// then the SP SHOULD first attempt to make the provisioned volume
// accessible from the combination of the two "zones" "Z5" and "Z3" in
// the "region" "R1". If that's not possible, it should fall back to
// a combination of "Z5" and other possibilities from the list of
// requisite. If that's not possible, it should fall back to a
// combination of "Z3" and other possibilities from the list of
// requisite. If that's not possible, it should fall back to a
// combination of other possibilities from the list of requisite.
Preferred []Topology `json:",omitempty"`
}
// Topology is a map of topological domains to topological segments.
//
// This description is taken verbatim from the CSI Spec:
//
// A topological domain is a sub-division of a cluster, like "region",
// "zone", "rack", etc.
// A topological segment is a specific instance of a topological domain,
// like "zone3", "rack3", etc.
// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
// Valid keys have two segments: an OPTIONAL prefix and name, separated
// by a slash (/), for example: "com.company.example/zone".
// The key name segment is REQUIRED. The prefix is OPTIONAL.
// The key name MUST be 63 characters or less, begin and end with an
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
// underscores (_), dots (.), or alphanumerics in between, for example
// "zone".
// The key prefix MUST be 63 characters or less, begin and end with a
// lower-case alphanumeric character ([a-z0-9]), contain only
// dashes (-), dots (.), or lower-case alphanumerics in between, and
// follow domain name notation format
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
// The key prefix SHOULD include the plugin's host company name and/or
// the plugin name, to minimize the possibility of collisions with keys
// from other plugins.
// If a key prefix is specified, it MUST be identical across all
// topology keys returned by the SP (across all RPCs).
// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
// MUST not both exist.
// Each value (topological segment) MUST contain 1 or more strings.
// Each string MUST be 63 characters or less and begin and end with an
// alphanumeric character with '-', '_', '.', or alphanumerics in
// between.
type Topology struct {
Segments map[string]string `json:",omitempty"`
}
// CapacityRange describes the minimum and maximum capacity a volume should be
// created with
type CapacityRange struct {
// RequiredBytes specifies that a volume must be at least this big. The
// value of 0 indicates an unspecified minimum.
RequiredBytes int64
// LimitBytes specifies that a volume must not be bigger than this. The
// value of 0 indicates an unspecified maximum
LimitBytes int64
}
// Secret represents a Swarm Secret value that must be passed to the CSI
// storage plugin when operating on this Volume. It represents one key-value
// pair of possibly many.
type Secret struct {
// Key is the name of the key of the key-value pair passed to the plugin.
Key string
// Secret is the swarm Secret object from which to read data. This can be a
// Secret name or ID. The Secret data is retrieved by Swarm and used as the
// value of the key-value pair passed to the plugin.
Secret string
}
// PublishState represents the state of a Volume as it pertains to its
// use on a particular Node.
type PublishState string
const (
// StatePending indicates that the volume should be published on
// this node, but the call to ControllerPublishVolume has not been
// successfully completed yet and the result recorded by swarmkit.
StatePending PublishState = "pending-publish"
// StatePublished means the volume is published successfully to the node.
StatePublished PublishState = "published"
// StatePendingNodeUnpublish indicates that the Volume should be
// unpublished on the Node, and we're waiting for confirmation that it has
// done so. After the Node has confirmed that the Volume has been
// unpublished, the state will move to StatePendingUnpublish.
StatePendingNodeUnpublish PublishState = "pending-node-unpublish"
// StatePendingUnpublish means the volume is still published to the node
// by the controller, awaiting the operation to unpublish it.
StatePendingUnpublish PublishState = "pending-controller-unpublish"
)
// PublishStatus represents the status of the volume as published to an
// individual node
type PublishStatus struct {
// NodeID is the ID of the swarm node this Volume is published to.
NodeID string `json:",omitempty"`
// State is the publish state of the volume.
State PublishState `json:",omitempty"`
// PublishContext is the PublishContext returned by the CSI plugin when
// a volume is published.
PublishContext map[string]string `json:",omitempty"`
}
// Info contains information about the Volume as a whole as provided by
// the CSI storage plugin.
type Info struct {
// CapacityBytes is the capacity of the volume in bytes. A value of 0
// indicates that the capacity is unknown.
CapacityBytes int64 `json:",omitempty"`
// VolumeContext is the context originating from the CSI storage plugin
// when the Volume is created.
VolumeContext map[string]string `json:",omitempty"`
// VolumeID is the ID of the Volume as seen by the CSI storage plugin. This
// is distinct from the Volume's Swarm ID, which is the ID used by all of
// the Docker Engine to refer to the Volume. If this field is blank, then
// the Volume has not been successfully created yet.
VolumeID string `json:",omitempty"`
// AccessibleTopolgoy is the topology this volume is actually accessible
// from.
AccessibleTopology []Topology `json:",omitempty"`
}

View File

@ -0,0 +1,29 @@
package volume
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// CreateOptions VolumeConfig
//
// Volume configuration
// swagger:model CreateOptions
type CreateOptions struct {
// cluster volume spec
ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"`
// Name of the volume driver to use.
Driver string `json:"Driver,omitempty"`
// A mapping of driver options and values. These options are
// passed directly to the driver and are driver specific.
//
DriverOpts map[string]string `json:"DriverOpts,omitempty"`
// User-defined key/value metadata.
Labels map[string]string `json:"Labels,omitempty"`
// The new volume's name. If not specified, Docker generates a name.
//
Name string `json:"Name,omitempty"`
}

View File

@ -0,0 +1,18 @@
package volume
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ListResponse VolumeListResponse
//
// Volume list response
// swagger:model ListResponse
type ListResponse struct {
// List of volumes
Volumes []*Volume `json:"Volumes"`
// Warnings that occurred when fetching the list of volumes.
//
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,15 @@
package volume // import "github.com/docker/docker/api/types/volume"
import "github.com/docker/docker/api/types/filters"
// ListOptions holds parameters to list volumes.
type ListOptions struct {
Filters filters.Args
}
// PruneReport contains the response for Engine API:
// POST "/volumes/prune"
type PruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}

View File

@ -0,0 +1,75 @@
package volume
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Volume volume
// swagger:model Volume
type Volume struct {
// cluster volume
ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"`
// Date/Time the volume was created.
CreatedAt string `json:"CreatedAt,omitempty"`
// Name of the volume driver used by the volume.
// Required: true
Driver string `json:"Driver"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// Mount path of the volume on the host.
// Required: true
Mountpoint string `json:"Mountpoint"`
// Name of the volume.
// Required: true
Name string `json:"Name"`
// The driver specific options used when creating the volume.
//
// Required: true
Options map[string]string `json:"Options"`
// The level at which the volume exists. Either `global` for cluster-wide,
// or `local` for machine level.
//
// Required: true
Scope string `json:"Scope"`
// Low-level details about the volume, provided by the volume driver.
// Details are returned as a map with key/value pairs:
// `{"key":"value","key2":"value2"}`.
//
// The `Status` field is optional, and is omitted if the volume driver
// does not support this feature.
//
Status map[string]interface{} `json:"Status,omitempty"`
// usage data
UsageData *UsageData `json:"UsageData,omitempty"`
}
// UsageData Usage details about the volume. This information is used by the
// `GET /system/df` endpoint, and omitted in other endpoints.
//
// swagger:model UsageData
type UsageData struct {
// The number of containers referencing this volume. This field
// is set to `-1` if the reference-count is not available.
//
// Required: true
RefCount int64 `json:"RefCount"`
// Amount of disk space used by the volume (in bytes). This information
// is only available for volumes created with the `"local"` volume
// driver. For volumes created with other volume drivers, this field
// is set to `-1` ("not available")
//
// Required: true
Size int64 `json:"Size"`
}

View File

@ -0,0 +1,7 @@
package volume // import "github.com/docker/docker/api/types/volume"
// UpdateOptions is configuration to update a Volume with.
type UpdateOptions struct {
// Spec is the ClusterVolumeSpec to update the volume to.
Spec *ClusterVolumeSpec `json:"Spec,omitempty"`
}

38
vendor/github.com/docker/docker/client/README.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
# Go client for the Docker Engine API
The `docker` command uses this package to communicate with the daemon. It can
also be used by your own Go applications to do anything the command-line
interface does running containers, pulling images, managing swarms, etc.
For example, to list all containers (the equivalent of `docker ps --all`):
```go
package main
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
)
func main() {
apiClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
defer apiClient.Close()
containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true})
if err != nil {
panic(err)
}
for _, ctr := range containers {
fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status)
}
}
```
[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client)

16
vendor/github.com/docker/docker/client/build_cancel.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
)
// BuildCancel requests the daemon to cancel the ongoing build request.
func (cli *Client) BuildCancel(ctx context.Context, id string) error {
query := url.Values{}
query.Set("id", id)
serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
ensureReaderClosed(serverResp)
return err
}

45
vendor/github.com/docker/docker/client/build_prune.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/pkg/errors"
)
// BuildCachePrune requests the daemon to delete unused cache data
func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil {
return nil, err
}
report := types.BuildCachePruneReport{}
query := url.Values{}
if opts.All {
query.Set("all", "1")
}
query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage)))
f, err := filters.ToJSON(opts.Filters)
if err != nil {
return nil, errors.Wrap(err, "prune could not marshal filters option")
}
query.Set("filters", f)
serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
defer ensureReaderClosed(serverResp)
if err != nil {
return nil, err
}
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
return nil, errors.Wrap(err, "error retrieving disk usage")
}
return &report, nil
}

View File

@ -0,0 +1,14 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"github.com/docker/docker/api/types/checkpoint"
)
// CheckpointCreate creates a checkpoint from the given container with the given name
func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error {
resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,20 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"net/url"
"github.com/docker/docker/api/types/checkpoint"
)
// CheckpointDelete deletes the checkpoint with the given name from the given container
func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error {
query := url.Values{}
if options.CheckpointDir != "" {
query.Set("dir", options.CheckpointDir)
}
resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,28 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"github.com/docker/docker/api/types/checkpoint"
)
// CheckpointList returns the checkpoints of the given container in the docker host
func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) {
var checkpoints []checkpoint.Summary
query := url.Values{}
if options.CheckpointDir != "" {
query.Set("dir", options.CheckpointDir)
}
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
return checkpoints, err
}
err = json.NewDecoder(resp.body).Decode(&checkpoints)
return checkpoints, err
}

461
vendor/github.com/docker/docker/client/client.go generated vendored Normal file
View File

@ -0,0 +1,461 @@
/*
Package client is a Go client for the Docker Engine API.
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/api/
# Usage
You use the library by constructing a client object using [NewClientWithOpts]
and calling methods on it. The client can be configured from environment
variables by passing the [FromEnv] option, or configured manually by passing any
of the other available [Opts].
For example, to list running containers (the equivalent of "docker ps"):
package main
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
)
func main() {
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
containers, err := cli.ContainerList(context.Background(), container.ListOptions{})
if err != nil {
panic(err)
}
for _, ctr := range containers {
fmt.Printf("%s %s\n", ctr.ID, ctr.Image)
}
}
*/
package client // import "github.com/docker/docker/client"
import (
"context"
"crypto/tls"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/go-connections/sockets"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/trace"
)
// DummyHost is a hostname used for local communication.
//
// It acts as a valid formatted hostname for local connections (such as "unix://"
// or "npipe://") which do not require a hostname. It should never be resolved,
// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2]
// and [RFC 6761, Section 6.3]).
//
// [RFC 7230, Section 5.4] defines that an empty header must be used for such
// cases:
//
// If the authority component is missing or undefined for the target URI,
// then a client MUST send a Host header field with an empty field-value.
//
// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not
// allow an empty header to be used, and requires req.URL.Scheme to be either
// "http" or "https".
//
// For further details, refer to:
//
// - https://github.com/docker/engine-api/issues/189
// - https://github.com/golang/go/issues/13624
// - https://github.com/golang/go/issues/61076
// - https://github.com/moby/moby/issues/45935
//
// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2
// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3
// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4
// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569
const DummyHost = "api.moby.localhost"
// fallbackAPIVersion is the version to fallback to if API-version negotiation
// fails. This version is the highest version of the API before API-version
// negotiation was introduced. If negotiation fails (or no API version was
// included in the API response), we assume the API server uses the most
// recent version before negotiation was introduced.
const fallbackAPIVersion = "1.24"
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
// scheme sets the scheme for the client
scheme string
// host holds the server address to connect to
host string
// proto holds the client protocol i.e. unix.
proto string
// addr holds the client address.
addr string
// basePath holds the path to prepend to the requests.
basePath string
// client used to send and receive http requests.
client *http.Client
// version of the server to talk to.
version string
// userAgent is the User-Agent header to use for HTTP requests. It takes
// precedence over User-Agent headers set in customHTTPHeaders, and other
// header variables. When set to an empty string, the User-Agent header
// is removed, and no header is sent.
userAgent *string
// custom HTTP headers configured by users.
customHTTPHeaders map[string]string
// manualOverride is set to true when the version was set by users.
manualOverride bool
// negotiateVersion indicates if the client should automatically negotiate
// the API version to use when making requests. API version negotiation is
// performed on the first request, after which negotiated is set to "true"
// so that subsequent requests do not re-negotiate.
negotiateVersion bool
// negotiated indicates that API version negotiation took place
negotiated atomic.Bool
// negotiateLock is used to single-flight the version negotiation process
negotiateLock sync.Mutex
tp trace.TracerProvider
// When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
// Store the original transport as the http.Client transport will be wrapped with tracing libs.
baseTransport *http.Transport
}
// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
var ErrRedirect = errors.New("unexpected redirect in response")
// CheckRedirect specifies the policy for dealing with redirect responses. It
// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for
// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise
// returns a [http.ErrUseLastResponse], which is special-cased by http.Client
// to use the last response.
//
// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308)
// in the client. The client (and by extension API client) can be made to send
// a request like "POST /containers//start" where what would normally be in the
// name section of the URL is empty. This triggers an HTTP 301 from the daemon.
//
// In go 1.8 this 301 is converted to a GET request, and ends up getting
// a 404 from the daemon. This behavior change manifests in the client in that
// before, the 301 was not followed and the client did not generate an error,
// but now results in a message like "Error response from daemon: page not found".
func CheckRedirect(_ *http.Request, via []*http.Request) error {
if via[0].Method == http.MethodGet {
return http.ErrUseLastResponse
}
return ErrRedirect
}
// NewClientWithOpts initializes a new API client with a default HTTPClient, and
// default API host and version. It also initializes the custom HTTP headers to
// add to each request.
//
// It takes an optional list of [Opt] functional arguments, which are applied in
// the order they're provided, which allows modifying the defaults when creating
// the client. For example, the following initializes a client that configures
// itself with values from environment variables ([FromEnv]), and has automatic
// API version negotiation enabled ([WithAPIVersionNegotiation]).
//
// cli, err := client.NewClientWithOpts(
// client.FromEnv,
// client.WithAPIVersionNegotiation(),
// )
func NewClientWithOpts(ops ...Opt) (*Client, error) {
hostURL, err := ParseHostURL(DefaultDockerHost)
if err != nil {
return nil, err
}
client, err := defaultHTTPClient(hostURL)
if err != nil {
return nil, err
}
c := &Client{
host: DefaultDockerHost,
version: api.DefaultVersion,
client: client,
proto: hostURL.Scheme,
addr: hostURL.Host,
}
for _, op := range ops {
if err := op(c); err != nil {
return nil, err
}
}
if tr, ok := c.client.Transport.(*http.Transport); ok {
// Store the base transport before we wrap it in tracing libs below
// This is used, as an example, to close idle connections when the client is closed
c.baseTransport = tr
}
if c.scheme == "" {
// TODO(stevvooe): This isn't really the right way to write clients in Go.
// `NewClient` should probably only take an `*http.Client` and work from there.
// Unfortunately, the model of having a host-ish/url-thingy as the connection
// string has us confusing protocol and transport layers. We continue doing
// this to avoid breaking existing clients but this should be addressed.
if c.tlsConfig() != nil {
c.scheme = "https"
} else {
c.scheme = "http"
}
}
c.client.Transport = otelhttp.NewTransport(
c.client.Transport,
otelhttp.WithTracerProvider(c.tp),
otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string {
return req.Method + " " + req.URL.Path
}),
)
return c, nil
}
func (cli *Client) tlsConfig() *tls.Config {
if cli.baseTransport == nil {
return nil
}
return cli.baseTransport.TLSClientConfig
}
func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
transport := &http.Transport{}
err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
if err != nil {
return nil, err
}
return &http.Client{
Transport: transport,
CheckRedirect: CheckRedirect,
}, nil
}
// Close the transport used by the client
func (cli *Client) Close() error {
if cli.baseTransport != nil {
cli.baseTransport.CloseIdleConnections()
return nil
}
return nil
}
// checkVersion manually triggers API version negotiation (if configured).
// This allows for version-dependent code to use the same version as will
// be negotiated when making the actual requests, and for which cases
// we cannot do the negotiation lazily.
func (cli *Client) checkVersion(ctx context.Context) error {
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() {
// Ensure exclusive write access to version and negotiated fields
cli.negotiateLock.Lock()
defer cli.negotiateLock.Unlock()
// May have been set during last execution of critical zone
if cli.negotiated.Load() {
return nil
}
ping, err := cli.Ping(ctx)
if err != nil {
return err
}
cli.negotiateAPIVersionPing(ping)
}
return nil
}
// getAPIPath returns the versioned request path to call the API.
// It appends the query parameters to the path if they are not empty.
func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {
var apiPath string
_ = cli.checkVersion(ctx)
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
apiPath = path.Join(cli.basePath, "/v"+v, p)
} else {
apiPath = path.Join(cli.basePath, p)
}
return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
}
// ClientVersion returns the API version used by this client.
func (cli *Client) ClientVersion() string {
return cli.version
}
// NegotiateAPIVersion queries the API and updates the version to match the API
// version. NegotiateAPIVersion downgrades the client's API version to match the
// APIVersion if the ping version is lower than the default version. If the API
// version reported by the server is higher than the maximum version supported
// by the client, it uses the client's maximum version.
//
// If a manual override is in place, either through the "DOCKER_API_VERSION"
// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized
// with a fixed version ([WithVersion]), no negotiation is performed.
//
// If the API server's ping response does not contain an API version, or if the
// client did not get a successful ping response, it assumes it is connected with
// an old daemon that does not support API version negotiation, in which case it
// downgrades to the latest version of the API before version negotiation was
// added (1.24).
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
if !cli.manualOverride {
// Avoid concurrent modification of version-related fields
cli.negotiateLock.Lock()
defer cli.negotiateLock.Unlock()
ping, err := cli.Ping(ctx)
if err != nil {
// FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it.
return
}
cli.negotiateAPIVersionPing(ping)
}
}
// NegotiateAPIVersionPing downgrades the client's API version to match the
// APIVersion in the ping response. If the API version in pingResponse is higher
// than the maximum version supported by the client, it uses the client's maximum
// version.
//
// If a manual override is in place, either through the "DOCKER_API_VERSION"
// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized
// with a fixed version ([WithVersion]), no negotiation is performed.
//
// If the API server's ping response does not contain an API version, we assume
// we are connected with an old daemon without API version negotiation support,
// and downgrade to the latest version of the API before version negotiation was
// added (1.24).
func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) {
if !cli.manualOverride {
// Avoid concurrent modification of version-related fields
cli.negotiateLock.Lock()
defer cli.negotiateLock.Unlock()
cli.negotiateAPIVersionPing(pingResponse)
}
}
// negotiateAPIVersionPing queries the API and updates the version to match the
// API version from the ping response.
func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) {
// default to the latest version before versioning headers existed
if pingResponse.APIVersion == "" {
pingResponse.APIVersion = fallbackAPIVersion
}
// if the client is not initialized with a version, start with the latest supported version
if cli.version == "" {
cli.version = api.DefaultVersion
}
// if server version is lower than the client version, downgrade
if versions.LessThan(pingResponse.APIVersion, cli.version) {
cli.version = pingResponse.APIVersion
}
// Store the results, so that automatic API version negotiation (if enabled)
// won't be performed on the next request.
if cli.negotiateVersion {
cli.negotiated.Store(true)
}
}
// DaemonHost returns the host address used by the client
func (cli *Client) DaemonHost() string {
return cli.host
}
// HTTPClient returns a copy of the HTTP client bound to the server
func (cli *Client) HTTPClient() *http.Client {
c := *cli.client
return &c
}
// ParseHostURL parses a url string, validates the string is a host url, and
// returns the parsed URL
func ParseHostURL(host string) (*url.URL, error) {
proto, addr, ok := strings.Cut(host, "://")
if !ok || addr == "" {
return nil, errors.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
return nil, err
}
addr = parsed.Host
basePath = parsed.Path
}
return &url.URL{
Scheme: proto,
Host: addr,
Path: basePath,
}, nil
}
func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) {
if cli.baseTransport == nil || cli.baseTransport.DialContext == nil {
return nil
}
if cli.baseTransport.TLSClientConfig != nil {
// When using a tls config we don't use the configured dialer but instead a fallback dialer...
// Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn
// I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit.
return nil
}
return cli.baseTransport.DialContext
}
// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
// that can be used for proxying the daemon connection. It is used by
// ["docker dial-stdio"].
//
// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
return func(ctx context.Context) (net.Conn, error) {
if dialFn := cli.dialerFromTransport(); dialFn != nil {
return dialFn(ctx, cli.proto, cli.addr)
}
switch cli.proto {
case "unix":
return net.Dial(cli.proto, cli.addr)
case "npipe":
return sockets.DialPipe(cli.addr, 32*time.Second)
default:
if tlsConfig := cli.tlsConfig(); tlsConfig != nil {
return tls.Dial(cli.proto, cli.addr, tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
}
}

View File

@ -0,0 +1,27 @@
package client
import "net/http"
// NewClient initializes a new API client for the given host and API version.
// It uses the given http client as transport.
// It also initializes the custom http headers to add to each request.
//
// It won't send any version information if the version number is empty. It is
// highly recommended that you set a version or your client may break if the
// server is upgraded.
//
// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion],
// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API
// version negotiation by passing the [WithAPIVersionNegotiation] option instead
// of WithVersion.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
}
// NewEnvClient initializes a new API client based on environment variables.
// See FromEnv for a list of support environment variables.
//
// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option.
func NewEnvClient() (*Client, error) {
return NewClientWithOpts(FromEnv)
}

View File

@ -0,0 +1,7 @@
//go:build !windows
package client // import "github.com/docker/docker/client"
// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
// (EnvOverrideHost) environment variable is unset or empty.
const DefaultDockerHost = "unix:///var/run/docker.sock"

View File

@ -0,0 +1,5 @@
package client // import "github.com/docker/docker/client"
// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
// (EnvOverrideHost) environment variable is unset or empty.
const DefaultDockerHost = "npipe:////./pipe/docker_engine"

View File

@ -0,0 +1,25 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
)
// ConfigCreate creates a new config.
func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
var response types.ConfigCreateResponse
if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil {
return response, err
}
resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
defer ensureReaderClosed(resp)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
return response, err
}

View File

@ -0,0 +1,36 @@
package client // import "github.com/docker/docker/client"
import (
"bytes"
"context"
"encoding/json"
"io"
"github.com/docker/docker/api/types/swarm"
)
// ConfigInspectWithRaw returns the config information with raw data
func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
if id == "" {
return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id}
}
if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil {
return swarm.Config{}, nil, err
}
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
return swarm.Config{}, nil, err
}
body, err := io.ReadAll(resp.body)
if err != nil {
return swarm.Config{}, nil, err
}
var config swarm.Config
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&config)
return config, body, err
}

38
vendor/github.com/docker/docker/client/config_list.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
"net/url"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// ConfigList returns the list of configs.
func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil {
return nil, err
}
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get(ctx, "/configs", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
return nil, err
}
var configs []swarm.Config
err = json.NewDecoder(resp.body).Decode(&configs)
return configs, err
}

Some files were not shown because too many files have changed in this diff Show More