vendor: github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46

full diff: 616e8db4c3...6068d1894d

a replace rule was needed (similar as in github.com/docker/docker) to fix some
dependency issues;

    github.com/docker/cli/cli/trust imports
        github.com/theupdateframework/notary/trustpinning tested by
        github.com/theupdateframework/notary/trustpinning.test imports
        github.com/cloudflare/cfssl/helpers imports
        github.com/google/certificate-transparency-go imports
        go.etcd.io/etcd/v3 imports
        go.etcd.io/etcd/tests/v3/integration imports
        go.etcd.io/etcd/server/v3/embed imports
        go.opentelemetry.io/otel/semconv: module go.opentelemetry.io/otel@latest found (v1.7.0), but does not contain package go.opentelemetry.io/otel/semconv
    github.com/docker/cli/cli/trust imports
        github.com/theupdateframework/notary/trustpinning tested by
        github.com/theupdateframework/notary/trustpinning.test imports
        github.com/cloudflare/cfssl/helpers imports
        github.com/google/certificate-transparency-go imports
        go.etcd.io/etcd/v3 imports
        go.etcd.io/etcd/tests/v3/integration imports
        go.etcd.io/etcd/server/v3/embed imports
        go.opentelemetry.io/otel/exporters/otlp imports
        go.opentelemetry.io/otel/sdk/metric/controller/basic imports
        go.opentelemetry.io/otel/metric/registry: module go.opentelemetry.io/otel/metric@latest found (v0.30.0), but does not contain package go.opentelemetry.io/otel/metric/registry

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2022-04-29 20:10:55 +02:00
parent 7aa0b273e5
commit 11924f498b
45 changed files with 385 additions and 378 deletions

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2018 Docker Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,24 +0,0 @@
### Notice
Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files.
```
$ make generate
```
Click [here](https://github.com/google/protobuf) for more information about protobuf.
The `api.pb.txt` file contains merged descriptors of all defined services and messages.
Definitions present here are considered frozen after the release.
At release time, the current `api.pb.txt` file will be moved into place to
freeze the API changes for the minor version. For example, when 1.0.0 is
released, `api.pb.txt` should be moved to `1.0.txt`. Notice that we leave off
the patch number, since the API will be completely locked down for a given
patch series.
We may find that by default, protobuf descriptors are too noisy to lock down
API changes. In that case, we may filter out certain fields in the descriptors,
possibly regenerating for old versions.
This process is similar to the [process used to ensure backwards compatibility
in Go](https://github.com/golang/go/tree/master/api).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/types.proto";
import "github.com/docker/swarmkit/api/specs.proto";
import "gogoproto/gogo.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// CA defines the RPC methods for requesting certificates from a CA.
service CA {
rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) {
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
};
// GetUnlockKey returns the current unlock key for the cluster for the role of the client
// asking.
rpc GetUnlockKey(GetUnlockKeyRequest) returns (GetUnlockKeyResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: ["swarm-manager"] };
};
}
service NodeCA {
rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) {
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
};
rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) {
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
};
}
message NodeCertificateStatusRequest {
string node_id = 1;
}
message NodeCertificateStatusResponse {
IssuanceStatus status = 1;
Certificate certificate = 2;
}
message IssueNodeCertificateRequest {
// DEPRECATED: Role is now selected based on which secret is matched.
NodeRole role = 1 [deprecated=true];
// CSR is the certificate signing request.
bytes csr = 2 [(gogoproto.customname) = "CSR"];
// Token represents a user-provided string that is necessary for new
// nodes to join the cluster
string token = 3;
// Availability allows a user to control the current scheduling status of a node
NodeSpec.Availability availability = 4;
}
message IssueNodeCertificateResponse {
string node_id = 1;
NodeSpec.Membership node_membership = 2;
}
message GetRootCACertificateRequest {}
message GetRootCACertificateResponse {
bytes certificate = 1;
}
message GetUnlockKeyRequest {}
message GetUnlockKeyResponse {
bytes unlock_key = 1;
Version version = 2 [(gogoproto.nullable) = false];
}

File diff suppressed because it is too large Load Diff

View File

@ -1,871 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/specs.proto";
import "github.com/docker/swarmkit/api/objects.proto";
import "github.com/docker/swarmkit/api/types.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// Control defines the RPC methods for controlling a cluster.
service Control {
rpc GetNode(GetNodeRequest) returns (GetNodeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc GetTask(GetTaskRequest) returns (GetTaskResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc GetService(GetServiceRequest) returns (GetServiceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// ListServiceStatuses returns a `ListServiceStatusesResponse` with the
// status of the requested services, formed by computing the number of
// running vs desired tasks. It is provided as a shortcut or helper method,
// which allows a client to avoid having to calculate this value by listing
// all Tasks. If any service requested does not exist, it will be returned
// but with empty status values.
rpc ListServiceStatuses(ListServiceStatusesRequest) returns (ListServiceStatusesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// --- secret APIs ---
// GetSecret returns a `GetSecretResponse` with a `Secret` with the same
// id as `GetSecretRequest.SecretID`
// - Returns `NotFound` if the Secret with the given id is not found.
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty.
// - Returns an error if getting fails.
rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same
// id as `GetSecretRequest.SecretID`
// - Returns `NotFound` if the Secret with the given id is not found.
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty.
// - Returns an error if updating fails.
rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in
// `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`.
// - Returns an error if listing fails.
rpc ListSecrets(ListSecretsRequest) returns (ListSecretsResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based
// on the provided `CreateSecretRequest.SecretSpec`.
// - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed,
// or if the secret data is too long or contains invalid characters.
// - Returns an error if the creation fails.
rpc CreateSecret(CreateSecretRequest) returns (CreateSecretResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`.
// - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty.
// - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found.
// - Returns an error if the deletion fails.
rpc RemoveSecret(RemoveSecretRequest) returns (RemoveSecretResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// --- config APIs ---
// GetConfig returns a `GetConfigResponse` with a `Config` with the same
// id as `GetConfigRequest.ConfigID`
// - Returns `NotFound` if the Config with the given id is not found.
// - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty.
// - Returns an error if getting fails.
rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same
// id as `GetConfigRequest.ConfigID`
// - Returns `NotFound` if the Config with the given id is not found.
// - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty.
// - Returns an error if updating fails.
rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// ListConfigs returns a `ListConfigResponse` with a list of `Config`s being
// managed, or all configs matching any name in `ListConfigsRequest.Names`, any
// name prefix in `ListConfigsRequest.NamePrefixes`, any id in
// `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`.
// - Returns an error if listing fails.
rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// CreateConfig creates and return a `CreateConfigResponse` with a `Config` based
// on the provided `CreateConfigRequest.ConfigSpec`.
// - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed,
// or if the config data is too long or contains invalid characters.
// - Returns an error if the creation fails.
rpc CreateConfig(CreateConfigRequest) returns (CreateConfigResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`.
// - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty.
// - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found.
// - Returns an error if the deletion fails.
rpc RemoveConfig(RemoveConfigRequest) returns (RemoveConfigResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// --- extension APIs ---
// GetExtension returns a `GetExtensionResponse` with a `Extension` with the same
// id as `GetExtensionRequest.ExtensionId`
// - Returns `NotFound` if the Extension with the given id is not found.
// - Returns `InvalidArgument` if the `GetExtensionRequest.ExtensionId` is empty.
// - Returns an error if the get fails.
rpc GetExtension(GetExtensionRequest) returns (GetExtensionResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// CreateExtension creates an `Extension` based on the provided `CreateExtensionRequest.Extension`
// and returns a `CreateExtensionResponse`.
// - Returns `InvalidArgument` if the `CreateExtensionRequest.Extension` is malformed,
// or fails validation.
// - Returns an error if the creation fails.
rpc CreateExtension(CreateExtensionRequest) returns (CreateExtensionResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// RemoveExtension removes the extension referenced by `RemoveExtensionRequest.ID`.
// - Returns `InvalidArgument` if `RemoveExtensionRequest.ExtensionId` is empty.
// - Returns `NotFound` if the an extension named `RemoveExtensionRequest.ExtensionId` is not found.
// - Returns an error if the deletion fails.
rpc RemoveExtension(RemoveExtensionRequest) returns (RemoveExtensionResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// --- resource APIs ---
// GetResource returns a `GetResourceResponse` with a `Resource` with the same
// id as `GetResourceRequest.Resource`
// - Returns `NotFound` if the Resource with the given id is not found.
// - Returns `InvalidArgument` if the `GetResourceRequest.Resource` is empty.
// - Returns an error if getting fails.
rpc GetResource(GetResourceRequest) returns (GetResourceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// UpdateResource updates the resource with the given `UpdateResourceRequest.Resource.Id` using the given `UpdateResourceRequest.Resource` and returns a `UpdateResourceResponse`.
// - Returns `NotFound` if the Resource with the given `UpdateResourceRequest.Resource.Id` is not found.
// - Returns `InvalidArgument` if the UpdateResourceRequest.Resource.Id` is empty.
// - Returns an error if updating fails.
rpc UpdateResource(UpdateResourceRequest) returns (UpdateResourceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// ListResources returns a `ListResourcesResponse` with a list of `Resource`s stored in the raft store,
// or all resources matching any name in `ListConfigsRequest.Names`, any
// name prefix in `ListResourcesRequest.NamePrefixes`, any id in
// `ListResourcesRequest.ResourceIDs`, or any id prefix in `ListResourcesRequest.IDPrefixes`,
// extension name equal to `ListResourcesRequest.Extension`.
// - Returns an error if listing fails.
rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// CreateResource returns a `CreateResourceResponse` after creating a `Resource` based
// on the provided `CreateResourceRequest.Resource`.
// - Returns `InvalidArgument` if the `CreateResourceRequest.Resource` is malformed,
// or if the config data is too long or contains invalid characters.
// - Returns an error if the creation fails.
rpc CreateResource(CreateResourceRequest) returns (CreateResourceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// RemoveResource removes the `Resource` referenced by `RemoveResourceRequest.ResourceID`.
// - Returns `InvalidArgument` if `RemoveResourceRequest.ResourceID` is empty.
// - Returns `NotFound` if the a resource named `RemoveResourceRequest.ResourceID` is not found.
// - Returns an error if the deletion fails.
rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// --- volumes APIs ---
// CreateVolume returns a `CreateVolumeResponse` with a `Volume` based on the
// provided `CreateVolumeRequest.VolumeSpec`.
// - Returns `InvalidArgument` if the `CreateVolumeRequest.VolumeSpec` is
// malformed.
rpc CreateVolume(CreateVolumeRequest) returns (CreateVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// GetVolume returns a `GetVolumeResponse` with a Volume with the same ID
// as `GetVolumeRequest.ID`
rpc GetVolume(GetVolumeRequest) returns (GetVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc UpdateVolume(UpdateVolumeRequest) returns (UpdateVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc RemoveVolume(RemoveVolumeRequest) returns (RemoveVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
}
message GetNodeRequest {
string node_id = 1;
}
message GetNodeResponse {
Node node = 1;
}
message ListNodesRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
// Labels refers to engine labels, which are labels set by the user on the
// node and reported back to the managers
map<string, string> labels = 3;
// NodeLabels are labels set on the node object on the managers.
map<string, string> node_labels = 7;
repeated NodeSpec.Membership memberships = 4 [packed=false];
repeated NodeRole roles = 5 [packed=false];
// NamePrefixes matches all objects with the given prefixes
repeated string name_prefixes = 6;
}
Filters filters = 1;
}
message ListNodesResponse {
repeated Node nodes = 1;
}
// UpdateNodeRequest requests an update to the specified node. This may be used
// to request a new availability for a node, such as PAUSE. Invalid updates
// will be denied and cause an error.
message UpdateNodeRequest {
string node_id = 1;
Version node_version = 2;
NodeSpec spec = 3;
}
message UpdateNodeResponse {
Node node = 1;
}
// RemoveNodeRequest requests to delete the specified node from store.
message RemoveNodeRequest {
string node_id = 1;
bool force = 2;
}
message RemoveNodeResponse {
}
message GetTaskRequest {
string task_id = 1;
}
message GetTaskResponse {
Task task = 1;
}
message RemoveTaskRequest {
string task_id = 1;
}
message RemoveTaskResponse {
}
message ListTasksRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string service_ids = 4;
repeated string node_ids = 5;
repeated docker.swarmkit.v1.TaskState desired_states = 6 [packed=false];
// NamePrefixes matches all objects with the given prefixes
repeated string name_prefixes = 7;
repeated string runtimes = 9;
// UpToDate matches tasks that are consistent with the current
// service definition.
// Note: this is intended for internal status reporting rather
// than being exposed to users. It may be removed in the future.
bool up_to_date = 8;
}
Filters filters = 1;
}
message ListTasksResponse {
repeated Task tasks = 1;
}
message CreateServiceRequest {
ServiceSpec spec = 1;
}
message CreateServiceResponse {
Service service = 1;
}
message GetServiceRequest {
string service_id = 1;
bool insert_defaults = 2;
}
message GetServiceResponse {
Service service = 1;
}
message UpdateServiceRequest {
string service_id = 1;
Version service_version = 2;
ServiceSpec spec = 3;
enum Rollback {
// This is not a rollback. The spec field of the request will
// be honored.
NONE = 0;
// Roll back the service - get spec from the service's
// previous_spec.
PREVIOUS = 1;
}
// Rollback may be set to PREVIOUS to request a rollback (the service's
// spec will be set to the value of its previous_spec field). In this
// case, the spec field of this request is ignored.
Rollback rollback = 4;
}
message UpdateServiceResponse {
Service service = 1;
}
message RemoveServiceRequest {
string service_id = 1;
}
message RemoveServiceResponse {
}
message ListServicesRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
// NamePrefixes matches all objects with the given prefixes
repeated string name_prefixes = 4;
repeated string runtimes = 5;
}
Filters filters = 1;
}
message ListServicesResponse {
repeated Service services = 1;
}
// ListServiceStatusesRequest is a request to get the aggregate status of a
// service by computing the number of running vs desired tasks. It includes
// only a service ID.
message ListServiceStatusesRequest {
// Services is a list of service IDs to get statuses for.
repeated string services = 1;
}
// ListServiceStatusesResponse is a response containing the aggregate status of
// a service, formed by computing the number of running vs desired tasks. The
// values returned are only valid for the point in time at which the request is
// made.
message ListServiceStatusesResponse {
message ServiceStatus {
// ServiceID is the ID of the service this status describes
string service_id = 1;
// DesiredTasks is the number of tasks desired to be running according to the
// service definition at request time. It is a uint64 because that is what
// the replicas field on the service spec is
uint64 desired_tasks = 2;
// RunningTasks is the number of tasks currently in the Running state at
// request time. This may be larger than desired tasks if, for example, a
// service has been scaled down.
uint64 running_tasks = 3;
// CompletedTasks is the number of tasks in state Completed, if this
// service is in mode ReplicatedJob or GlobalJob. This must be
// cross-referenced with the service type, because the default value of 0
// may mean that a service is not in a Job mode, or it may mean the Job has
// yet to complete any Tasks.
uint64 completed_tasks = 4;
}
repeated ServiceStatus statuses = 1;
}
message CreateNetworkRequest {
NetworkSpec spec = 1;
}
message CreateNetworkResponse {
Network network = 1;
}
message GetNetworkRequest {
string name = 1;
string network_id = 2;
}
message GetNetworkResponse {
Network network = 1;
}
message RemoveNetworkRequest {
string name = 1;
string network_id = 2;
}
message RemoveNetworkResponse {}
message ListNetworksRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
// NamePrefixes matches all objects with the given prefixes
repeated string name_prefixes = 4;
}
Filters filters = 1;
}
message ListNetworksResponse {
repeated Network networks = 1;
}
message GetClusterRequest {
string cluster_id = 1;
}
message GetClusterResponse {
Cluster cluster = 1;
}
message ListClustersRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
// NamePrefixes matches all objects with the given prefixes
repeated string name_prefixes = 4;
}
Filters filters = 1;
}
message ListClustersResponse {
repeated Cluster clusters = 1;
}
// KeyRotation tells UpdateCluster what items to rotate
message KeyRotation {
// WorkerJoinToken tells UpdateCluster to rotate the worker secret token.
bool worker_join_token = 1;
// ManagerJoinToken tells UpdateCluster to rotate the manager secret token.
bool manager_join_token = 2;
// ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key
bool manager_unlock_key = 3;
}
message UpdateClusterRequest {
// ClusterID is the cluster ID to update.
string cluster_id = 1;
// ClusterVersion is the version of the cluster being updated.
Version cluster_version = 2;
// Spec is the new spec to apply to the cluster.
ClusterSpec spec = 3;
// Rotation contains flags for join token and unlock key rotation
KeyRotation rotation = 4 [(gogoproto.nullable) = false];
}
message UpdateClusterResponse {
Cluster cluster = 1;
}
// GetSecretRequest is the request to get a `Secret` object given a secret id.
message GetSecretRequest {
string secret_id = 1;
}
// GetSecretResponse contains the Secret corresponding to the id in
// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret`
// object should be nil instead of actually containing the secret bytes.
message GetSecretResponse {
Secret secret = 1;
}
message UpdateSecretRequest {
// SecretID is the secret ID to update.
string secret_id = 1;
// SecretVersion is the version of the secret being updated.
Version secret_version = 2;
// Spec is the new spec to apply to the Secret
// Only some fields are allowed to be updated.
SecretSpec spec = 3;
}
message UpdateSecretResponse {
Secret secret = 1;
}
// ListSecretRequest is the request to list all non-internal secrets in the secret store,
// or all secrets filtered by (name or name prefix or id prefix) and labels.
message ListSecretsRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string name_prefixes = 4;
}
Filters filters = 1;
}
// ListSecretResponse contains a list of all the secrets that match the name or
// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data`
// field in each `Secret` object should be nil instead of actually containing
// the secret bytes.
message ListSecretsResponse {
repeated Secret secrets = 1;
}
// CreateSecretRequest specifies a new secret (it will not update an existing
// secret) to create.
message CreateSecretRequest {
SecretSpec spec = 1;
}
// CreateSecretResponse contains the newly created `Secret` corresponding to the
// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead
// of actually containing the secret bytes.
message CreateSecretResponse {
Secret secret = 1;
}
// RemoveSecretRequest contains the ID of the secret that should be removed. This
// removes all versions of the secret.
message RemoveSecretRequest {
string secret_id = 1;
}
// RemoveSecretResponse is an empty object indicating the successful removal of
// a secret.
message RemoveSecretResponse {}
// GetConfigRequest is the request to get a `Config` object given a config id.
message GetConfigRequest {
string config_id = 1;
}
// GetConfigResponse contains the Config corresponding to the id in
// `GetConfigRequest`.
message GetConfigResponse {
Config config = 1;
}
message UpdateConfigRequest {
// ConfigID is the config ID to update.
string config_id = 1;
// ConfigVersion is the version of the config being updated.
Version config_version = 2;
// Spec is the new spec to apply to the Config
// Only some fields are allowed to be updated.
ConfigSpec spec = 3;
}
message UpdateConfigResponse {
Config config = 1;
}
// ListConfigRequest is the request to list all configs in the config store,
// or all configs filtered by (name or name prefix or id prefix) and labels.
message ListConfigsRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string name_prefixes = 4;
}
Filters filters = 1;
}
// ListConfigResponse contains a list of all the configs that match the name or
// name prefix filters provided in `ListConfigRequest`.
message ListConfigsResponse {
repeated Config configs = 1;
}
// CreateConfigRequest specifies a new config (it will not update an existing
// config) to create.
message CreateConfigRequest {
ConfigSpec spec = 1;
}
// CreateConfigResponse contains the newly created `Config` corresponding to the
// name in `CreateConfigRequest`.
message CreateConfigResponse {
Config config = 1;
}
// RemoveConfigRequest contains the ID of the config that should be removed. This
// removes all versions of the config.
message RemoveConfigRequest {
string config_id = 1;
}
// RemoveConfigResponse is an empty object indicating the successful removal of
// a config.
message RemoveConfigResponse {}
// CreateExtensionRequest creates a new extension as specified by the provided
// parameters
message CreateExtensionRequest {
Annotations annotations = 1;
string description = 2;
}
// CreateExtensionResponse contains the newly created `Extension` corresponding
// to the parameters in the CreateExtensionRequest.
message CreateExtensionResponse {
Extension extension = 1;
}
// RemoveExtensionRequest contains the ID of the extension that should be removed. This
// removes all versions of the extension.
message RemoveExtensionRequest {
string extension_id = 1;
}
// RemoveExtensionResponse is an empty object indicating the successful removal
// of an extension.
message RemoveExtensionResponse {
}
// GetResourceRequest is the request to get a Extension object given a extension id.
message GetExtensionRequest {
string extension_id = 1;
}
// GetExtensionResponse contains the Extension corresponding to the id in
// `GetExtensionRequest`.
message GetExtensionResponse {
Extension extension = 1;
}
// CreateResourceRequest creates a new resource specified by the included
// resource object. An existing resource will not be updated.
message CreateResourceRequest {
Annotations annotations = 1;
string kind = 2;
google.protobuf.Any payload = 3;
}
// CreateResourceResponse contains the newly created `Resource` corresponding
// to the resource in the CreateResourceRequest.
message CreateResourceResponse {
Resource resource = 1;
}
// RemoveResourceRequest contains the ID of the resource that should be removed. This
// removes all versions of the resource.
message RemoveResourceRequest {
string resource_id = 1;
}
// RemoveResourceResponse is an empty object indicating the successful removal
// of a resource.
message RemoveResourceResponse {
}
// UpdateResourceRequest updates the resource specified by the given resource object.
message UpdateResourceRequest {
string resource_id = 1;
Version resource_version = 2;
// Annotations describes the annotations to update. If the Annotations should
// be unchanged, then this field should be left empty. Note that the name of
// a Resource cannot be changed, only its labels.
Annotations annotations = 3;
// Payload describes the new payload of the resource. If the Payload should
// be unchanged, then this field should be left empty.
google.protobuf.Any payload = 4;
}
message UpdateResourceResponse {
Resource resource = 1;
}
// GetResourceRequest is the request to get a Resource object given a resource id.
message GetResourceRequest {
string resource_id = 1;
}
// GetResourceResponse contains the Resource corresponding to the id in
// `GetResourceRequest`.
message GetResourceResponse {
Resource resource = 1;
}
// ListResourcesRequest is the request to list all resources in the raft store,
// or all resources filtered by (name or name prefix or id prefix), labels and extension.
message ListResourcesRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string name_prefixes = 4;
string kind = 5;
}
Filters filters = 1;
}
// ListResourcesResponse contains a list of all the resources that match the name or
// name prefix filters provided in `ListResourcesRequest`.
message ListResourcesResponse {
repeated Resource resources = 1;
}
message CreateVolumeRequest {
VolumeSpec spec = 1;
}
message CreateVolumeResponse {
Volume volume = 1;
}
message GetVolumeRequest {
string volume_id = 1;
}
message GetVolumeResponse {
Volume volume = 1;
}
message UpdateVolumeRequest {
string volume_id = 1;
Version volume_version = 2;
VolumeSpec spec = 3;
}
message UpdateVolumeResponse {
Volume volume = 1;
}
message ListVolumesRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string name_prefixes = 4;
repeated string groups = 5;
repeated string drivers = 6;
}
Filters filters = 1;
}
message ListVolumesResponse {
repeated Volume volumes = 1;
}
message RemoveVolumeRequest {
string volume_id = 1;
// Force forces the volume to be deleted from swarmkit, regardless of
// whether its current state would permit such an action.
bool force = 2;
}
message RemoveVolumeResponse {}

View File

@ -1,59 +0,0 @@
package deepcopy
import (
"fmt"
"time"
"github.com/gogo/protobuf/types"
)
// CopierFrom can be implemented if an object knows how to copy another into itself.
type CopierFrom interface {
// Copy takes the fields from src and copies them into the target object.
//
// Calling this method with a nil receiver or a nil src may panic.
CopyFrom(src interface{})
}
// Copy copies src into dst. dst and src must have the same type.
//
// If the type has a copy function defined, it will be used.
//
// Default implementations for builtin types and well known protobuf types may
// be provided.
//
// If the copy cannot be performed, this function will panic. Make sure to test
// types that use this function.
func Copy(dst, src interface{}) {
switch dst := dst.(type) {
case *types.Any:
src := src.(*types.Any)
dst.TypeUrl = src.TypeUrl
if src.Value != nil {
dst.Value = make([]byte, len(src.Value))
copy(dst.Value, src.Value)
} else {
dst.Value = nil
}
case *types.Duration:
src := src.(*types.Duration)
*dst = *src
case *time.Duration:
src := src.(*time.Duration)
*dst = *src
case *types.Timestamp:
src := src.(*types.Timestamp)
*dst = *src
case *types.BoolValue:
src := src.(*types.BoolValue)
*dst = *src
case *types.Int64Value:
src := src.(*types.Int64Value)
*dst = *src
case CopierFrom:
dst.CopyFrom(src)
default:
panic(fmt.Sprintf("Copy for %T not implemented", dst))
}
}

View File

@ -1,99 +0,0 @@
package defaults
import (
"time"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/deepcopy"
gogotypes "github.com/gogo/protobuf/types"
)
// Service is a ServiceSpec object with all fields filled in using default
// values.
var Service = api.ServiceSpec{
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
StopGracePeriod: gogotypes.DurationProto(10 * time.Second),
PullOptions: &api.ContainerSpec_PullOptions{},
DNSConfig: &api.ContainerSpec_DNSConfig{},
},
},
Resources: &api.ResourceRequirements{},
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: gogotypes.DurationProto(5 * time.Second),
},
Placement: &api.Placement{},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_PAUSE,
Monitor: gogotypes.DurationProto(5 * time.Second),
Parallelism: 1,
Order: api.UpdateConfig_STOP_FIRST,
},
Rollback: &api.UpdateConfig{
FailureAction: api.UpdateConfig_PAUSE,
Monitor: gogotypes.DurationProto(5 * time.Second),
Parallelism: 1,
Order: api.UpdateConfig_STOP_FIRST,
},
}
// InterpolateService returns a ServiceSpec based on the provided spec, which
// has all unspecified values filled in with default values.
func InterpolateService(origSpec *api.ServiceSpec) *api.ServiceSpec {
spec := origSpec.Copy()
container := spec.Task.GetContainer()
defaultContainer := Service.Task.GetContainer()
if container != nil {
if container.StopGracePeriod == nil {
container.StopGracePeriod = &gogotypes.Duration{}
deepcopy.Copy(container.StopGracePeriod, defaultContainer.StopGracePeriod)
}
if container.PullOptions == nil {
container.PullOptions = defaultContainer.PullOptions.Copy()
}
if container.DNSConfig == nil {
container.DNSConfig = defaultContainer.DNSConfig.Copy()
}
}
if spec.Task.Resources == nil {
spec.Task.Resources = Service.Task.Resources.Copy()
}
if spec.Task.Restart == nil {
spec.Task.Restart = Service.Task.Restart.Copy()
} else {
if spec.Task.Restart.Delay == nil {
spec.Task.Restart.Delay = &gogotypes.Duration{}
deepcopy.Copy(spec.Task.Restart.Delay, Service.Task.Restart.Delay)
}
}
if spec.Task.Placement == nil {
spec.Task.Placement = Service.Task.Placement.Copy()
}
if spec.Update == nil {
spec.Update = Service.Update.Copy()
} else {
if spec.Update.Monitor == nil {
spec.Update.Monitor = &gogotypes.Duration{}
deepcopy.Copy(spec.Update.Monitor, Service.Update.Monitor)
}
}
if spec.Rollback == nil {
spec.Rollback = Service.Rollback.Copy()
} else {
if spec.Rollback.Monitor == nil {
spec.Rollback.Monitor = &gogotypes.Duration{}
deepcopy.Copy(spec.Rollback.Monitor, Service.Rollback.Monitor)
}
}
return spec
}

File diff suppressed because it is too large Load Diff

View File

@ -1,251 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/types.proto";
import "github.com/docker/swarmkit/api/objects.proto";
import "gogoproto/gogo.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
import "google/protobuf/duration.proto";
// Dispatcher is the API provided by a manager group for agents to connect to. Agents
// connect to this service to receive task assignments and report status.
//
// API methods on this service are used only by agent nodes.
service Dispatcher { // maybe dispatch, al likes this
// Session starts an agent session with the dispatcher. The session is
// started after the first SessionMessage is received.
//
// Once started, the agent is controlled with a stream of SessionMessage.
// Agents should list on the stream at all times for instructions.
rpc Session(SessionRequest) returns (stream SessionMessage) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// Heartbeat is heartbeat method for nodes. It returns new TTL in response.
// Node should send new heartbeat earlier than now + TTL, otherwise it will
// be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// UpdateTaskStatus updates status of task. Node should send such updates
// on every status change of its tasks.
//
// Whether receiving batch updates or single status updates, this method
// should be accepting. Errors should only be returned if the entire update
// should be retried, due to data loss or other problems.
//
// If a task is unknown the dispatcher, the status update should be
// accepted regardless.
rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// UpdateVolumeStatus updates the status of a Volume. Like
// UpdateTaskStatus, the node should send such updates on every status
// change of its volumes.
rpc UpdateVolumeStatus(UpdateVolumeStatusRequest) returns (UpdateVolumeStatusResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
rpc Tasks(TasksRequest) returns (stream TasksMessage) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
option deprecated = true;
};
// Assignments is a stream of assignments such as tasks and secrets for node.
// The first message in the stream contains all of the tasks and secrets
// that are relevant to the node. Future messages in the stream are updates to
// the set of assignments.
rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
}
// SessionRequest starts a session.
message SessionRequest {
NodeDescription description = 1;
// SessionID can be provided to attempt resuming an existing session. If the
// SessionID is empty or invalid, a new SessionID will be assigned.
//
// See SessionMessage.SessionID for details.
string session_id = 2;
}
// SessionMessage instructs an agent on various actions as part of the current
// session. An agent should act immediately on the contents.
message SessionMessage {
// SessionID is allocated after a successful registration. It should be
// used on all RPC calls after registration. A dispatcher may choose to
// change the SessionID, at which time an agent must re-register and obtain
// a new one.
//
// All Dispatcher calls after register should include the SessionID. If the
// Dispatcher so chooses, it may reject the call with an InvalidArgument
// error code, at which time the agent should call Register to start a new
// session.
//
// As a rule, once an agent has a SessionID, it should never save it to
// disk or try to otherwise reuse. If the agent loses its SessionID, it
// must start a new session through a call to Register. A Dispatcher may
// choose to reuse the SessionID, if it sees fit, but it is not advised.
//
// The actual implementation of the SessionID is Dispatcher specific and
// should be treated as opaque by agents.
//
// From a Dispatcher perspective, there are many ways to use the SessionID
// to ensure uniqueness of a set of client RPC calls. One method is to keep
// the SessionID unique to every call to Register in a single Dispatcher
// instance. This ensures that the SessionID represents the unique
// session from a single Agent to Manager. If the Agent restarts, we
// allocate a new session, since the restarted Agent is not aware of the
// new SessionID.
//
// The most compelling use case is to support duplicate node detection. If
// one clones a virtual machine, including certificate material, two nodes
// may end up with the same identity. This can also happen if two identical
// agent processes are coming from the same node. If the SessionID is
// replicated through the cluster, we can immediately detect the condition
// and address it.
//
// Extending from the case above, we can actually detect a compromised
// identity. Coupled with provisions to rebuild node identity, we can ban
// the compromised node identity and have the nodes re-authenticate and
// build a new identity. At this time, an administrator can then
// re-authorize the compromised nodes, if it was a mistake or ensure that a
// misbehaved node can no longer connect to the cluster.
//
// We considered placing this field in a GRPC header. Because this is a
// critical feature of the protocol, we thought it should be represented
// directly in the RPC message set.
string session_id = 1;
// Node identifies the registering node.
Node node = 2;
// Managers provides a weight list of alternative dispatchers
repeated WeightedPeer managers = 3;
// Symmetric encryption key distributed by the lead manager. Used by agents
// for securing network bootstrapping and communication.
repeated EncryptionKey network_bootstrap_keys = 4;
// Which root certificates to trust
bytes RootCA = 5;
}
// HeartbeatRequest provides identifying properties for a single heartbeat.
message HeartbeatRequest {
string session_id = 1;
}
message HeartbeatResponse {
// Period is the duration to wait before sending the next heartbeat.
// Well-behaved agents should update this on every heartbeat round trip.
google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
}
message UpdateTaskStatusRequest {
// Tasks should contain all statuses for running tasks. Only the status
// field must be set. The spec is not required.
string session_id = 1;
message TaskStatusUpdate {
string task_id = 1;
TaskStatus status = 2;
}
repeated TaskStatusUpdate updates = 3;
}
message UpdateTaskStatusResponse{
// void
}
message UpdateVolumeStatusRequest {
string session_id = 1;
message VolumeStatusUpdate {
// ID is the ID of the volume being updated. This is the Swarmkit ID,
// not the CSI VolumeID.
string id = 1;
// Unpublished is set to true when the volume is affirmatively
// unpublished on the Node side. We don't need to report that a Volume
// is published on the the node; as soon as the Volume is assigned to
// the Node, we must assume that it has been published until informed
// otherwise.
//
// Further, the Node must not send unpublished = true unless it will
// definitely no longer attempt to call NodePublishVolume.
bool unpublished = 2;
}
repeated VolumeStatusUpdate updates = 2;
}
message UpdateVolumeStatusResponse {
// empty on purpose
}
message TasksRequest {
string session_id = 1;
}
message TasksMessage {
// Tasks is the set of tasks that should be running on the node.
// Tasks outside of this set running on the node should be terminated.
repeated Task tasks = 1;
}
message AssignmentsRequest {
string session_id = 1;
}
message Assignment {
oneof item {
Task task = 1;
Secret secret = 2;
Config config = 3;
VolumeAssignment volume = 4;
}
}
message AssignmentChange {
enum AssignmentAction {
UPDATE = 0 [(gogoproto.enumvalue_customname) = "AssignmentActionUpdate"];
REMOVE = 1 [(gogoproto.enumvalue_customname) = "AssignmentActionRemove"];
}
Assignment assignment = 1;
AssignmentAction action = 2;
}
message AssignmentsMessage {
// AssignmentType specifies whether this assignment message carries
// the full state, or is an update to an existing state.
enum Type {
COMPLETE = 0;
INCREMENTAL = 1;
}
Type type = 1;
// AppliesTo references the previous ResultsIn value, to chain
// incremental updates together. For the first update in a stream,
// AppliesTo is empty. If AppliesTo does not match the previously
// received ResultsIn, the consumer of the stream should start a new
// Assignments stream to re-sync.
string applies_to = 2;
// ResultsIn identifies the result of this assignments message, to
// match against the next message's AppliesTo value and protect
// against missed messages.
string results_in = 3;
// AssignmentChange is a set of changes to apply on this node.
repeated AssignmentChange changes = 4;
}

View File

@ -1,111 +0,0 @@
package genericresource
import (
"github.com/docker/swarmkit/api"
)
// NewSet creates a set object
func NewSet(key string, vals ...string) []*api.GenericResource {
rs := make([]*api.GenericResource, 0, len(vals))
for _, v := range vals {
rs = append(rs, NewString(key, v))
}
return rs
}
// NewString creates a String resource
func NewString(key, val string) *api.GenericResource {
return &api.GenericResource{
Resource: &api.GenericResource_NamedResourceSpec{
NamedResourceSpec: &api.NamedGenericResource{
Kind: key,
Value: val,
},
},
}
}
// NewDiscrete creates a Discrete resource
func NewDiscrete(key string, val int64) *api.GenericResource {
return &api.GenericResource{
Resource: &api.GenericResource_DiscreteResourceSpec{
DiscreteResourceSpec: &api.DiscreteGenericResource{
Kind: key,
Value: val,
},
},
}
}
// GetResource returns resources from the "resources" parameter matching the kind key
func GetResource(kind string, resources []*api.GenericResource) []*api.GenericResource {
var res []*api.GenericResource
for _, r := range resources {
if Kind(r) != kind {
continue
}
res = append(res, r)
}
return res
}
// ConsumeNodeResources removes "res" from nodeAvailableResources
func ConsumeNodeResources(nodeAvailableResources *[]*api.GenericResource, res []*api.GenericResource) {
if nodeAvailableResources == nil {
return
}
w := 0
loop:
for _, na := range *nodeAvailableResources {
for _, r := range res {
if Kind(na) != Kind(r) {
continue
}
if remove(na, r) {
continue loop
}
// If this wasn't the right element then
// we need to continue
}
(*nodeAvailableResources)[w] = na
w++
}
*nodeAvailableResources = (*nodeAvailableResources)[:w]
}
// Returns true if the element is to be removed from the list
func remove(na, r *api.GenericResource) bool {
switch tr := r.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
if na.GetDiscreteResourceSpec() == nil {
return false // Type change, ignore
}
na.GetDiscreteResourceSpec().Value -= tr.DiscreteResourceSpec.Value
if na.GetDiscreteResourceSpec().Value <= 0 {
return true
}
case *api.GenericResource_NamedResourceSpec:
if na.GetNamedResourceSpec() == nil {
return false // Type change, ignore
}
if tr.NamedResourceSpec.Value != na.GetNamedResourceSpec().Value {
return false // not the right item, ignore
}
return true
}
return false
}

View File

@ -1,111 +0,0 @@
package genericresource
import (
"encoding/csv"
"fmt"
"strconv"
"strings"
"github.com/docker/swarmkit/api"
)
func newParseError(format string, args ...interface{}) error {
return fmt.Errorf("could not parse GenericResource: "+format, args...)
}
// discreteResourceVal returns an int64 if the string is a discreteResource
// and an error if it isn't
func discreteResourceVal(res string) (int64, error) {
return strconv.ParseInt(res, 10, 64)
}
// allNamedResources returns true if the array of resources are all namedResources
// e.g: res = [red, orange, green]
func allNamedResources(res []string) bool {
for _, v := range res {
if _, err := discreteResourceVal(v); err == nil {
return false
}
}
return true
}
// ParseCmd parses the Generic Resource command line argument
// and returns a list of *api.GenericResource
func ParseCmd(cmd string) ([]*api.GenericResource, error) {
if strings.Contains(cmd, "\n") {
return nil, newParseError("unexpected '\\n' character")
}
r := csv.NewReader(strings.NewReader(cmd))
records, err := r.ReadAll()
if err != nil {
return nil, newParseError("%v", err)
}
if len(records) != 1 {
return nil, newParseError("found multiple records while parsing cmd %v", records)
}
return Parse(records[0])
}
// Parse parses a table of GenericResource resources
func Parse(cmds []string) ([]*api.GenericResource, error) {
tokens := make(map[string][]string)
for _, term := range cmds {
kva := strings.Split(term, "=")
if len(kva) != 2 {
return nil, newParseError("incorrect term %s, missing"+
" '=' or malformed expression", term)
}
key := strings.TrimSpace(kva[0])
val := strings.TrimSpace(kva[1])
tokens[key] = append(tokens[key], val)
}
var rs []*api.GenericResource
for k, v := range tokens {
if u, ok := isDiscreteResource(v); ok {
if u < 0 {
return nil, newParseError("cannot ask for"+
" negative resource %s", k)
}
rs = append(rs, NewDiscrete(k, u))
continue
}
if allNamedResources(v) {
rs = append(rs, NewSet(k, v...)...)
continue
}
return nil, newParseError("mixed discrete and named resources"+
" in expression '%s=%s'", k, v)
}
return rs, nil
}
// isDiscreteResource returns true if the array of resources is a
// Discrete Resource.
// e.g: res = [1]
func isDiscreteResource(values []string) (int64, bool) {
if len(values) != 1 {
return int64(0), false
}
u, err := discreteResourceVal(values[0])
if err != nil {
return int64(0), false
}
return u, true
}

View File

@ -1,203 +0,0 @@
package genericresource
import (
"fmt"
"github.com/docker/swarmkit/api"
)
// Claim assigns GenericResources to a task by taking them from the
// node's GenericResource list and storing them in the task's available list
func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
taskReservations []*api.GenericResource) error {
var resSelected []*api.GenericResource
for _, res := range taskReservations {
tr := res.GetDiscreteResourceSpec()
if tr == nil {
return fmt.Errorf("task should only hold Discrete type")
}
// Select the resources
nrs, err := selectNodeResources(*nodeAvailableResources, tr)
if err != nil {
return err
}
resSelected = append(resSelected, nrs...)
}
ClaimResources(nodeAvailableResources, taskAssigned, resSelected)
return nil
}
// ClaimResources adds the specified resources to the task's list
// and removes them from the node's generic resource list
func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
resSelected []*api.GenericResource) {
*taskAssigned = append(*taskAssigned, resSelected...)
ConsumeNodeResources(nodeAvailableResources, resSelected)
}
func selectNodeResources(nodeRes []*api.GenericResource,
tr *api.DiscreteGenericResource) ([]*api.GenericResource, error) {
var nrs []*api.GenericResource
for _, res := range nodeRes {
if Kind(res) != tr.Kind {
continue
}
switch nr := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
if nr.DiscreteResourceSpec.Value >= tr.Value && tr.Value != 0 {
nrs = append(nrs, NewDiscrete(tr.Kind, tr.Value))
}
return nrs, nil
case *api.GenericResource_NamedResourceSpec:
nrs = append(nrs, res.Copy())
if int64(len(nrs)) == tr.Value {
return nrs, nil
}
}
}
if len(nrs) == 0 {
return nil, fmt.Errorf("not enough resources available for task reservations: %+v", tr)
}
return nrs, nil
}
// Reclaim adds the resources taken by the task to the node's store
func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error {
err := reclaimResources(nodeAvailableResources, taskAssigned)
if err != nil {
return err
}
sanitize(nodeRes, nodeAvailableResources)
return nil
}
func reclaimResources(nodeAvailableResources *[]*api.GenericResource, taskAssigned []*api.GenericResource) error {
// The node could have been updated
if nodeAvailableResources == nil {
return fmt.Errorf("node no longer has any resources")
}
for _, res := range taskAssigned {
switch tr := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
nrs := GetResource(tr.DiscreteResourceSpec.Kind, *nodeAvailableResources)
// If the resource went down to 0 it's no longer in the
// available list
if len(nrs) == 0 {
*nodeAvailableResources = append(*nodeAvailableResources, res.Copy())
}
if len(nrs) != 1 {
continue // Type change
}
nr := nrs[0].GetDiscreteResourceSpec()
if nr == nil {
continue // Type change
}
nr.Value += tr.DiscreteResourceSpec.Value
case *api.GenericResource_NamedResourceSpec:
*nodeAvailableResources = append(*nodeAvailableResources, res.Copy())
}
}
return nil
}
// sanitize checks that nodeAvailableResources does not add resources unknown
// to the nodeSpec (nodeRes) or goes over the integer bound specified
// by the spec.
// Note this is because the user is able to update a node's resources
func sanitize(nodeRes []*api.GenericResource, nodeAvailableResources *[]*api.GenericResource) {
// - We add the sanitized resources at the end, after
// having removed the elements from the list
// - When a set changes to a Discrete we also need
// to make sure that we don't add the Discrete multiple
// time hence, the need of a map to remember that
var sanitized []*api.GenericResource
kindSanitized := make(map[string]struct{})
w := 0
for _, na := range *nodeAvailableResources {
ok, nrs := sanitizeResource(nodeRes, na)
if !ok {
if _, ok = kindSanitized[Kind(na)]; ok {
continue
}
kindSanitized[Kind(na)] = struct{}{}
sanitized = append(sanitized, nrs...)
continue
}
(*nodeAvailableResources)[w] = na
w++
}
*nodeAvailableResources = (*nodeAvailableResources)[:w]
*nodeAvailableResources = append(*nodeAvailableResources, sanitized...)
}
// Returns true if the element is in nodeRes and "sane"
// Returns false if the element isn't in nodeRes and "sane" and the element(s) that should be replacing it
func sanitizeResource(nodeRes []*api.GenericResource, res *api.GenericResource) (ok bool, nrs []*api.GenericResource) {
switch na := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
nrs := GetResource(na.DiscreteResourceSpec.Kind, nodeRes)
// Type change or removed: reset
if len(nrs) != 1 {
return false, nrs
}
// Type change: reset
nr := nrs[0].GetDiscreteResourceSpec()
if nr == nil {
return false, nrs
}
// Amount change: reset
if na.DiscreteResourceSpec.Value > nr.Value {
return false, nrs
}
case *api.GenericResource_NamedResourceSpec:
nrs := GetResource(na.NamedResourceSpec.Kind, nodeRes)
// Type change
if len(nrs) == 0 {
return false, nrs
}
for _, nr := range nrs {
// Type change: reset
if nr.GetDiscreteResourceSpec() != nil {
return false, nrs
}
if na.NamedResourceSpec.Value == nr.GetNamedResourceSpec().Value {
return true, nil
}
}
// Removed
return false, nil
}
return true, nil
}

View File

@ -1,54 +0,0 @@
package genericresource
import (
"strconv"
"strings"
"github.com/docker/swarmkit/api"
)
func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string {
return strconv.FormatInt(d.DiscreteResourceSpec.Value, 10)
}
// Kind returns the kind key as a string
func Kind(res *api.GenericResource) string {
switch r := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return r.DiscreteResourceSpec.Kind
case *api.GenericResource_NamedResourceSpec:
return r.NamedResourceSpec.Kind
}
return ""
}
// Value returns the value key as a string
func Value(res *api.GenericResource) string {
switch res := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return discreteToString(res)
case *api.GenericResource_NamedResourceSpec:
return res.NamedResourceSpec.Value
}
return ""
}
// EnvFormat returns the environment string version of the resource
func EnvFormat(res []*api.GenericResource, prefix string) []string {
envs := make(map[string][]string)
for _, v := range res {
key := Kind(v)
val := Value(v)
envs[key] = append(envs[key], val)
}
env := make([]string, 0, len(res))
for k, v := range envs {
k = strings.ToUpper(prefix + "_" + k)
env = append(env, k+"="+strings.Join(v, ","))
}
return env
}

View File

@ -1,85 +0,0 @@
package genericresource
import (
"fmt"
"github.com/docker/swarmkit/api"
)
// ValidateTask validates that the task only uses integers
// for generic resources
func ValidateTask(resources *api.Resources) error {
for _, v := range resources.Generic {
if v.GetDiscreteResourceSpec() != nil {
continue
}
return fmt.Errorf("invalid argument for resource %s", Kind(v))
}
return nil
}
// HasEnough returns true if node can satisfy the task's GenericResource request
func HasEnough(nodeRes []*api.GenericResource, taskRes *api.GenericResource) (bool, error) {
t := taskRes.GetDiscreteResourceSpec()
if t == nil {
return false, fmt.Errorf("task should only hold Discrete type")
}
if nodeRes == nil {
return false, nil
}
nrs := GetResource(t.Kind, nodeRes)
if len(nrs) == 0 {
return false, nil
}
switch nr := nrs[0].Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
if t.Value > nr.DiscreteResourceSpec.Value {
return false, nil
}
case *api.GenericResource_NamedResourceSpec:
if t.Value > int64(len(nrs)) {
return false, nil
}
}
return true, nil
}
// HasResource checks if there is enough "res" in the "resources" argument
func HasResource(res *api.GenericResource, resources []*api.GenericResource) bool {
for _, r := range resources {
if Kind(res) != Kind(r) {
continue
}
switch rtype := r.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
if res.GetDiscreteResourceSpec() == nil {
return false
}
if res.GetDiscreteResourceSpec().Value < rtype.DiscreteResourceSpec.Value {
return false
}
return true
case *api.GenericResource_NamedResourceSpec:
if res.GetNamedResourceSpec() == nil {
return false
}
if res.GetNamedResourceSpec().Value != rtype.NamedResourceSpec.Value {
continue
}
return true
}
}
return false
}

View File

@ -1,772 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/docker/swarmkit/api/health.proto
package api
import (
context "context"
fmt "fmt"
raftselector "github.com/docker/swarmkit/manager/raftselector"
_ "github.com/docker/swarmkit/protobuf/plugin"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
metadata "google.golang.org/grpc/metadata"
peer "google.golang.org/grpc/peer"
status "google.golang.org/grpc/status"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
rafttime "time"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type HealthCheckResponse_ServingStatus int32
const (
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
)
var HealthCheckResponse_ServingStatus_name = map[int32]string{
0: "UNKNOWN",
1: "SERVING",
2: "NOT_SERVING",
}
var HealthCheckResponse_ServingStatus_value = map[string]int32{
"UNKNOWN": 0,
"SERVING": 1,
"NOT_SERVING": 2,
}
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_288522a148aed5ad, []int{1, 0}
}
type HealthCheckRequest struct {
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
}
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
func (*HealthCheckRequest) ProtoMessage() {}
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_288522a148aed5ad, []int{0}
}
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HealthCheckRequest.Merge(m, src)
}
func (m *HealthCheckRequest) XXX_Size() int {
return m.Size()
}
func (m *HealthCheckRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
type HealthCheckResponse struct {
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
}
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
func (*HealthCheckResponse) ProtoMessage() {}
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_288522a148aed5ad, []int{1}
}
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_HealthCheckResponse.Merge(m, src)
}
func (m *HealthCheckResponse) XXX_Size() int {
return m.Size()
}
func (m *HealthCheckResponse) XXX_DiscardUnknown() {
xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
}
var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
func init() {
proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest")
proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse")
}
func init() {
proto.RegisterFile("github.com/docker/swarmkit/api/health.proto", fileDescriptor_288522a148aed5ad)
}
var fileDescriptor_288522a148aed5ad = []byte{
// 328 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2f, 0x2e,
0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x48, 0x4d, 0xcc, 0x29,
0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xa8, 0xd0, 0x83, 0xa9, 0xd0, 0x2b,
0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xe6,
0x78, 0x8c, 0x05, 0xab, 0x48, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x83, 0x52,
0x10, 0x8d, 0x4a, 0x7a, 0x5c, 0x42, 0x1e, 0x60, 0x2b, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52,
0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53,
0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, 0x05, 0x8c, 0x5c, 0xc2, 0x28, 0x1a,
0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x7c, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b,
0xc1, 0x1a, 0xf8, 0x8c, 0x4c, 0xf5, 0x30, 0xdd, 0xae, 0x87, 0x45, 0xa3, 0x5e, 0x30, 0xc8, 0xe0,
0xbc, 0xf4, 0x60, 0xb0, 0xe6, 0x20, 0xa8, 0x21, 0x4a, 0x56, 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc,
0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50,
0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80,
0xc9, 0xa8, 0x92, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x3e, 0x17, 0x2b, 0xd8, 0x32, 0x21, 0x35, 0x82,
0xae, 0x01, 0xfb, 0x5b, 0x4a, 0x9d, 0x48, 0x57, 0x2b, 0x89, 0x9e, 0x5a, 0xf7, 0x6e, 0x06, 0x13,
0x3f, 0x17, 0x2f, 0x58, 0xa1, 0x6e, 0x6e, 0x62, 0x5e, 0x62, 0x7a, 0x6a, 0x91, 0x93, 0xca, 0x89,
0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1,
0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e,
0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0x70, 0xd0, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0xc9, 0xe8, 0xd3, 0x02, 0x0c, 0x02, 0x00, 0x00,
}
type authenticatedWrapperHealthServer struct {
local HealthServer
authorize func(context.Context, []string) error
}
func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer {
return &authenticatedWrapperHealthServer{
local: local,
authorize: authorize,
}
}
func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
return nil, err
}
return p.local.Check(ctx, r)
}
func (m *HealthCheckRequest) Copy() *HealthCheckRequest {
if m == nil {
return nil
}
o := &HealthCheckRequest{}
o.CopyFrom(m)
return o
}
func (m *HealthCheckRequest) CopyFrom(src interface{}) {
o := src.(*HealthCheckRequest)
*m = *o
}
func (m *HealthCheckResponse) Copy() *HealthCheckResponse {
if m == nil {
return nil
}
o := &HealthCheckResponse{}
o.CopyFrom(m)
return o
}
func (m *HealthCheckResponse) CopyFrom(src interface{}) {
o := src.(*HealthCheckResponse)
*m = *o
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// HealthClient is the client API for Health service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HealthClient interface {
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
}
type healthClient struct {
cc *grpc.ClientConn
}
func NewHealthClient(cc *grpc.ClientConn) HealthClient {
return &healthClient{cc}
}
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
err := c.cc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HealthServer is the server API for Health service.
type HealthServer interface {
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
}
// UnimplementedHealthServer can be embedded to have forward compatible implementations.
type UnimplementedHealthServer struct {
}
func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
s.RegisterService(&_Health_serviceDesc, srv)
}
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HealthCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HealthServer).Check(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/docker.swarmkit.v1.Health/Check",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Health_serviceDesc = grpc.ServiceDesc{
ServiceName: "docker.swarmkit.v1.Health",
HandlerType: (*HealthServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Check",
Handler: _Health_Check_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/docker/swarmkit/api/health.proto",
}
func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HealthCheckRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Service) > 0 {
i -= len(m.Service)
copy(dAtA[i:], m.Service)
i = encodeVarintHealth(dAtA, i, uint64(len(m.Service)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HealthCheckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Status != 0 {
i = encodeVarintHealth(dAtA, i, uint64(m.Status))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintHealth(dAtA []byte, offset int, v uint64) int {
offset -= sovHealth(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
type raftProxyHealthServer struct {
local HealthServer
connSelector raftselector.ConnProvider
localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error)
}
func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer {
redirectChecker := func(ctx context.Context) (context.Context, error) {
p, ok := peer.FromContext(ctx)
if !ok {
return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := p.Addr.String()
md, ok := metadata.FromIncomingContext(ctx)
if ok && len(md["redirect"]) != 0 {
return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
}
md["redirect"] = append(md["redirect"], addr)
return metadata.NewOutgoingContext(ctx, md), nil
}
remoteMods := []func(context.Context) (context.Context, error){redirectChecker}
remoteMods = append(remoteMods, remoteCtxMod)
var localMods []func(context.Context) (context.Context, error)
if localCtxMod != nil {
localMods = []func(context.Context) (context.Context, error){localCtxMod}
}
return &raftProxyHealthServer{
local: local,
connSelector: connSelector,
localCtxMods: localMods,
remoteCtxMods: remoteMods,
}
}
func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) {
var err error
for _, mod := range ctxMods {
ctx, err = mod(ctx)
if err != nil {
return ctx, err
}
}
return ctx, nil
}
func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
ticker := rafttime.NewTicker(500 * rafttime.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
conn, err := p.connSelector.LeaderConn(ctx)
if err != nil {
return nil, err
}
client := NewHealthClient(conn)
resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
if err != nil || resp.Status != HealthCheckResponse_SERVING {
continue
}
return conn, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
conn, err := p.connSelector.LeaderConn(ctx)
if err != nil {
if err == raftselector.ErrIsLeader {
ctx, err = p.runCtxMods(ctx, p.localCtxMods)
if err != nil {
return nil, err
}
return p.local.Check(ctx, r)
}
return nil, err
}
modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods)
if err != nil {
return nil, err
}
resp, err := NewHealthClient(conn).Check(modCtx, r)
if err != nil {
if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
return resp, err
}
conn, err := p.pollNewLeaderConn(ctx)
if err != nil {
if err == raftselector.ErrIsLeader {
return p.local.Check(ctx, r)
}
return nil, err
}
return NewHealthClient(conn).Check(modCtx, r)
}
return resp, err
}
func (m *HealthCheckRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Service)
if l > 0 {
n += 1 + l + sovHealth(uint64(l))
}
return n
}
func (m *HealthCheckResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Status != 0 {
n += 1 + sovHealth(uint64(m.Status))
}
return n
}
func sovHealth(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozHealth(x uint64) (n int) {
return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *HealthCheckRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&HealthCheckRequest{`,
`Service:` + fmt.Sprintf("%v", this.Service) + `,`,
`}`,
}, "")
return s
}
func (this *HealthCheckResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&HealthCheckResponse{`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`}`,
}, "")
return s
}
func valueToStringHealth(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealth
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthHealth
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Service = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipHealth(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthHealth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Status |= HealthCheckResponse_ServingStatus(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipHealth(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthHealth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipHealth(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthHealth
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupHealth
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthHealth
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupHealth = fmt.Errorf("proto: unexpected end of group")
)

View File

@ -1,34 +0,0 @@
syntax = "proto3";
// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto
//
// We use the same health check service proto description defined in the gRPC documentation,
// including the authorization check. This requires our own implementation of the health
// package located in `manager/health`.
//
// For more infos, refer to:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
package docker.swarmkit.v1;
import "gogoproto/gogo.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
service Health {
rpc Check(HealthCheckRequest) returns (HealthCheckResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
}
message HealthCheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
}
ServingStatus status = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,188 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// LogStream defines the stream from which the log message came.
enum LogStream {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "LogStream";
LOG_STREAM_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "LogStreamUnknown"];
LOG_STREAM_STDOUT = 1 [(gogoproto.enumvalue_customname) = "LogStreamStdout"];
LOG_STREAM_STDERR = 2 [(gogoproto.enumvalue_customname) = "LogStreamStderr"];
}
message LogSubscriptionOptions {
// Streams defines which log streams should be sent from the task source.
// Empty means send all the messages.
repeated LogStream streams = 1 [packed=false];
// Follow instructs the publisher to continue sending log messages as they
// are produced, after satisfying the initial query.
bool follow = 2;
// Tail defines how many messages relative to the log stream to send when
// starting the stream.
//
// Positive values will skip that number of messages from the start of the
// stream before publishing.
//
// Negative values will specify messages relative to the end of the stream,
// offset by one. We can say that the last (-n-1) lines are returned when n
// < 0. As reference, -1 would mean send no log lines (typically used with
// follow), -2 would return the last log line, -11 would return the last 10
// and so on.
//
// The default value of zero will return all logs.
//
// Note that this is very different from the Docker API.
int64 tail = 3;
// Since indicates that only log messages produced after this timestamp
// should be sent.
// Note: can't use stdtime because this field is nullable.
google.protobuf.Timestamp since = 4;
}
// LogSelector will match logs from ANY of the defined parameters.
//
// For the best effect, the client should use the least specific parameter
// possible. For example, if they want to listen to all the tasks of a service,
// they should use the service id, rather than specifying the individual tasks.
message LogSelector {
repeated string service_ids = 1;
repeated string node_ids = 2;
repeated string task_ids = 3;
}
// LogContext marks the context from which a log message was generated.
message LogContext {
string service_id = 1;
string node_id = 2;
string task_id = 3;
}
// LogAttr is an extra key/value pair that may be have been set by users
message LogAttr {
string key = 1;
string value = 2;
}
// LogMessage
message LogMessage {
// Context identifies the source of the log message.
LogContext context = 1 [(gogoproto.nullable) = false];
// Timestamp is the time at which the message was generated.
// Note: can't use stdtime because this field is nullable.
google.protobuf.Timestamp timestamp = 2;
// Stream identifies the stream of the log message, stdout or stderr.
LogStream stream = 3;
// Data is the raw log message, as generated by the application.
bytes data = 4;
// Attrs is a list of key value pairs representing additional log details
// that may have been returned from the logger
repeated LogAttr attrs = 5 [(gogoproto.nullable) = false];
}
// Logs defines the methods for retrieving task logs messages from a cluster.
service Logs {
// SubscribeLogs starts a subscription with the specified selector and options.
//
// The subscription will be distributed to relevant nodes and messages will
// be collected and sent via the returned stream.
//
// The subscription will end with an EOF.
rpc SubscribeLogs(SubscribeLogsRequest) returns (stream SubscribeLogsMessage) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
}
message SubscribeLogsRequest {
// LogSelector describes the logs to which the subscriber is
LogSelector selector = 1;
LogSubscriptionOptions options = 2;
}
message SubscribeLogsMessage {
repeated LogMessage messages = 1 [(gogoproto.nullable) = false];
}
// LogBroker defines the API used by the worker to send task logs back to a
// manager. A client listens for subscriptions then optimistically retrieves
// logs satisfying said subscriptions, calling PublishLogs for results that are
// relevant.
//
// The structure of ListenSubscriptions is similar to the Dispatcher API but
// decoupled to allow log distribution to work outside of the regular task
// flow.
service LogBroker {
// ListenSubscriptions starts a subscription stream for the node. For each
// message received, the node should attempt to satisfy the subscription.
//
// Log messages that match the provided subscription should be sent via
// PublishLogs.
rpc ListenSubscriptions(ListenSubscriptionsRequest) returns (stream SubscriptionMessage) {
option (docker.protobuf.plugin.tls_authorization) = {
roles: "swarm-worker"
roles: "swarm-manager"
};
}
// PublishLogs receives sets of log messages destined for a single
// subscription identifier.
rpc PublishLogs(stream PublishLogsMessage) returns (PublishLogsResponse) {
option (docker.protobuf.plugin.tls_authorization) = {
roles: "swarm-worker"
roles: "swarm-manager"
};
}
}
// ListenSubscriptionsRequest is a placeholder to begin listening for
// subscriptions.
message ListenSubscriptionsRequest { }
// SubscriptionMessage instructs the listener to start publishing messages for
// the stream or end a subscription.
//
// If Options.Follow == false, the worker should end the subscription on its own.
message SubscriptionMessage {
// ID identifies the subscription.
string id = 1;
// Selector defines which sources should be sent for the subscription.
LogSelector selector = 2;
// Options specify how the subscription should be satisfied.
LogSubscriptionOptions options = 3;
// Close will be true if the node should shutdown the subscription with the
// provided identifier.
bool close = 4;
}
message PublishLogsMessage {
// SubscriptionID identifies which subscription the set of messages should
// be sent to. We can think of this as a "mail box" for the subscription.
string subscription_id = 1;
// Messages is the log message for publishing.
repeated LogMessage messages = 2 [(gogoproto.nullable) = false];
// Close is a boolean for whether or not the client has completed its log
// stream. When close is called, the manager can hang up the subscription.
// Any further logs from this subscription are an error condition. Any
// messages included when close is set can be discarded
bool close = 3;
}
message PublishLogsResponse { }

File diff suppressed because it is too large Load Diff

View File

@ -1,556 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/types.proto";
import "github.com/docker/swarmkit/api/specs.proto";
import "google/protobuf/timestamp.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// This file contains definitions for all first-class objects in the cluster
// API. Such types typically have a corresponding specification, with the
// naming XXXSpec, but not all.
// Meta contains metadata about objects. Every object contains a meta field.
message Meta {
// Version tracks the current version of the object.
Version version = 1 [(gogoproto.nullable) = false];
// Object timestamps.
// Note: can't use stdtime because these fields are nullable.
google.protobuf.Timestamp created_at = 2;
google.protobuf.Timestamp updated_at = 3;
}
// Node provides the internal node state as seen by the cluster.
message Node {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
role: true
membership: true
}
};
// ID specifies the identity of the node.
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec defines the desired state of the node as specified by the user.
// The system will honor this and will *never* modify it.
NodeSpec spec = 3 [(gogoproto.nullable) = false];
// Description encapsulated the properties of the Node as reported by the
// agent.
NodeDescription description = 4;
// Status provides the current status of the node, as seen by the manager.
NodeStatus status = 5 [(gogoproto.nullable) = false];
// ManagerStatus provides the current status of the node's manager
// component, if the node is a manager.
ManagerStatus manager_status = 6;
// DEPRECATED: Use Attachments to find the ingress network
// The node attachment to the ingress network.
NetworkAttachment attachment = 7 [deprecated=true];
// Certificate is the TLS certificate issued for the node, if any.
Certificate certificate = 8 [(gogoproto.nullable) = false];
// Role is the *observed* role for this node. It differs from the
// desired role set in Node.Spec.Role because the role here is only
// updated after the Raft member list has been reconciled with the
// desired role from the spec.
//
// This field represents the current reconciled state. If an action is
// to be performed, first verify the role in the cert. This field only
// shows the privilege level that the CA would currently grant when
// issuing or renewing the node's certificate.
NodeRole role = 9;
// Attachments enumerates the network attachments for the node to set up an
// endpoint on the node to be used for load balancing. Each overlay
// network, including ingress network, will have an NetworkAttachment.
repeated NetworkAttachment attachments = 10;
// VXLANUDPPort specifies the UDP port for VXLAN traffic.
// This information is passed from cluster object to individual nodes.
uint32 VXLANUDPPort = 11;
}
message Service {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
ServiceSpec spec = 3 [(gogoproto.nullable) = false];
// SpecVersion versions Spec, to identify changes in the spec. Note that
// this is not directly comparable to the service's Version.
Version spec_version = 10;
// PreviousSpec is the previous service spec that was in place before
// "Spec".
ServiceSpec previous_spec = 6;
// PreviousSpecVersion versions PreviousSpec. Note that this is not
// directly comparable to the service's Version.
Version previous_spec_version = 11;
// Runtime state of service endpoint. This may be different
// from the spec version because the user may not have entered
// the optional fields like node_port or virtual_ip and it
// could be auto allocated by the system.
Endpoint endpoint = 4;
// UpdateStatus contains the status of an update, if one is in
// progress.
UpdateStatus update_status = 5;
// JobStatus contains the status of a Service that is in one of the Job
// modes. It is absent on Replicated and Global services.
JobStatus job_status = 12;
// PendingDelete indicates that this service's deletion has been requested.
// Services, as well as all service-level resources, can only be deleted
// after all of the service's containers have properly shut down.
// When a user requests a deletion, we just flip this flag
// the deallocator will take it from there - it will start monitoring
// this service's tasks, and proceed to delete the service itself (and
// potentially its associated resources also marked for deletion) when
// all of its tasks are gone
bool pending_delete = 7;
}
// Endpoint specified all the network parameters required to
// correctly discover and load balance a service
message Endpoint {
EndpointSpec spec = 1;
// Runtime state of the exposed ports which may carry
// auto-allocated swarm ports in addition to the user
// configured information.
repeated PortConfig ports = 2;
// An endpoint attachment specifies the data that the process
// of attaching an endpoint to a network creates.
// VirtualIP specifies a set of networks this endpoint will be attached to
// and the IP addresses the target service will be made available under.
message VirtualIP {
// NetworkID for which this endpoint attachment was created.
string network_id = 1;
// A virtual IP is used to address this service in IP
// layer that the client can use to send requests to
// this service. A DNS A/AAAA query on the service
// name might return this IP to the client. This is
// strictly a logical IP and there may not be any
// interfaces assigned this IP address or any route
// created for this address. More than one to
// accommodate for both IPv4 and IPv6
string addr = 2;
}
// VirtualIPs specifies the IP addresses under which this endpoint will be
// made available.
repeated VirtualIP virtual_ips = 3 [(gogoproto.customname) = "VirtualIPs"];
}
// Task specifies the parameters for implementing a Spec. A task is effectively
// immutable and idempotent. Once it is dispatched to a node, it will not be
// dispatched to another node.
message Task {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
service_id: true
node_id: true
slot: true
desired_state: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec defines the desired state of the task as specified by the user.
// The system will honor this and will *never* modify it.
TaskSpec spec = 3 [(gogoproto.nullable) = false];
// SpecVersion is copied from Service, to identify which version of the
// spec this task has. Note that this is not directly comparable to the
// service's Version.
Version spec_version = 14;
// ServiceID indicates the service under which this task is orchestrated. This
// should almost always be set.
string service_id = 4;
// Slot is the service slot number for a task.
// For example, if a replicated service has replicas = 2, there will be a
// task with slot = 1, and another with slot = 2.
uint64 slot = 5;
// NodeID indicates the node to which the task is assigned. If this field
// is empty or not set, the task is unassigned.
string node_id = 6;
// Annotations defines the names and labels for the runtime, as set by
// the cluster manager.
//
// As backup, if this field has an empty name, the runtime will
// allocate a unique name for the actual container.
//
// NOTE(stevvooe): The preserves the ability for us to making naming
// decisions for tasks in orchestrator, albeit, this is left empty for now.
Annotations annotations = 7 [(gogoproto.nullable) = false];
// ServiceAnnotations is a direct copy of the service name and labels when
// this task is created.
//
// Labels set here will *not* be propagated to the runtime target, such as a
// container. Use labels on the runtime target for that purpose.
Annotations service_annotations = 8 [(gogoproto.nullable) = false];
TaskStatus status = 9 [(gogoproto.nullable) = false];
// DesiredState is the target state for the task. It is set to
// TaskStateRunning when a task is first created, and changed to
// TaskStateShutdown if the manager wants to terminate the task. This field
// is only written by the manager.
TaskState desired_state = 10;
// List of network attachments by the task.
repeated NetworkAttachment networks = 11;
// A copy of runtime state of service endpoint from Service
// object to be distributed to agents as part of the task.
Endpoint endpoint = 12;
// LogDriver specifies the selected log driver to use for the task. Agent
// processes should always favor the value in this field.
//
// If present in the TaskSpec, this will be a copy of that value. The
// orchestrator may choose to insert a value here, which should be honored,
// such a cluster default or policy-based value.
//
// If not present, the daemon's default will be used.
Driver log_driver = 13;
repeated GenericResource assigned_generic_resources = 15;
// JobIteration is the iteration number of the Job-mode Service that this
// task belongs to.
Version job_iteration = 16;
// Volumes is a list of VolumeAttachments for this task. It specifies which
// volumes this task is allocated.
repeated VolumeAttachment volumes = 17;
}
// NetworkAttachment specifies the network parameters of attachment to
// a single network by an object such as task or node.
message NetworkAttachment {
// Network state as a whole becomes part of the object so that
// it always is available for use in agents so that agents
// don't have any other dependency during execution.
Network network = 1;
// List of IPv4/IPv6 addresses that are assigned to the object
// as part of getting attached to this network.
repeated string addresses = 2;
// List of aliases by which a task is resolved in a network
repeated string aliases = 3;
// Map of all the driver attachment options for this network
map<string,string> driver_attachment_opts = 4;
}
message Network {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
NetworkSpec spec = 3 [(gogoproto.nullable) = false];
// Driver specific operational state provided by the network driver.
Driver driver_state = 4;
// Runtime state of IPAM options. This may not reflect the
// ipam options from NetworkSpec.
IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"];
// PendingDelete indicates that this network's deletion has been requested.
// Services, as well as all service-level resources, can only be deleted
// after all the service's containers have properly shut down
// when a user requests a deletion, we just flip this flag
// the deallocator will take it from there
// PendingDelete indicates that this network's deletion has been requested.
// Services, as well as all service-level resources, can only be deleted
// after all of the service's containers have properly shut down.
// When a user requests a deletion of this network, we just flip this flag
// the deallocator will take it from there - it will start monitoring
// the services that still use this service, and proceed to delete
// this network when all of these services are gone
bool pending_delete = 6;
}
// Cluster provides global cluster settings.
message Cluster {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
ClusterSpec spec = 3 [(gogoproto.nullable) = false];
// RootCA contains key material for the root CA.
RootCA root_ca = 4 [(gogoproto.nullable)=false, (gogoproto.customname) = "RootCA"];
// Symmetric encryption key distributed by the lead manager. Used by agents
// for securing network bootstrapping and communication.
repeated EncryptionKey network_bootstrap_keys = 5;
// Logical clock used to timestamp every key. It allows other managers
// and agents to unambiguously identify the older key to be deleted when
// a new key is allocated on key rotation.
uint64 encryption_key_lamport_clock = 6;
// BlacklistedCertificates tracks certificates that should no longer
// be honored. It's a mapping from CN -> BlacklistedCertificate.
// swarm. Their certificates should effectively be blacklisted.
map<string, BlacklistedCertificate> blacklisted_certificates = 8;
// UnlockKeys defines the keys that lock node data at rest. For example,
// this would contain the key encrypting key (KEK) that will encrypt the
// manager TLS keys at rest and the raft encryption keys at rest.
// If the key is empty, the node will be unlocked (will not require a key
// to start up from a shut down state).
repeated EncryptionKey unlock_keys = 9;
// FIPS specifies whether this cluster should be in FIPS mode. This changes
// the format of the join tokens, and nodes that are not FIPS-enabled should
// reject joining the cluster. Nodes that report themselves to be non-FIPS
// should be rejected from the cluster.
bool fips = 10 [(gogoproto.customname) = "FIPS"];
// This field specifies default subnet pools for global scope networks. If
// unspecified, Docker will use the predefined subnets as it works on older releases.
// Format Example : {"20.20.0.0/16",""20.20.0.0/16"}
repeated string defaultAddressPool = 11;
// This flag specifies the default subnet size of global scope networks by giving
// the length of the subnet masks for every such network
uint32 subnetSize = 12;
// VXLANUDPPort specifies the UDP port for VXLAN traffic.
uint32 VXLANUDPPort = 13;
}
// Secret represents a secret that should be passed to a container or a node,
// and is immutable.
message Secret {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec contains the actual secret data, as well as any context around the
// secret data that the user provides.
SecretSpec spec = 3 [(gogoproto.nullable) = false];
// Whether the secret is an internal secret (not set by a user) or not.
bool internal = 4;
}
// Config represents a set of configuration files that should be passed to a
// container.
message Config {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec contains the actual config data, as well as any context around the
// config data that the user provides.
ConfigSpec spec = 3 [(gogoproto.nullable) = false];
}
// Resource is a top-level object with externally defined content and indexing.
// SwarmKit can serve as a store for these objects without understanding their
// meanings.
message Resource {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
kind: true
}
};
string id = 1 [(gogoproto.customname) = "ID"];
Meta meta = 2 [(gogoproto.nullable) = false];
Annotations annotations = 3 [(gogoproto.nullable) = false];
// Kind identifies this class of object. It is essentially a namespace
// to keep IDs or indices from colliding between unrelated Resource
// objects. This must correspond to the name of an Extension.
string kind = 4;
// Payload bytes. This data is not interpreted in any way by SwarmKit.
// By convention, it should be a marshalled protocol buffers message.
google.protobuf.Any payload = 5;
}
// Extension declares a type of "resource" object. This message provides some
// metadata about the objects.
message Extension {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
string id = 1 [(gogoproto.customname) = "ID"];
Meta meta = 2 [(gogoproto.nullable) = false];
Annotations annotations = 3 [(gogoproto.nullable) = false];
string description = 4;
// TODO(aaronl): Add optional indexing capabilities. It would be
// extremely useful be able to automatically introspect protobuf, json,
// etc. objects and automatically index them based on a schema and field
// paths defined here.
//
//oneof Schema {
// google.protobuf.Descriptor protobuf = 1;
// bytes json = 2;
//}
//
//Schema schema = 5;
//
// // Indices, with values expressed as Go templates.
//repeated IndexEntry index_templates = 6;
}
// Volume is the top-level object describing a volume usable by Swarmkit. The
// Volume contains the user's VolumeSpec, the Volume's status, and the Volume
// object that was returned by the CSI Plugin when the volume was created.
message Volume {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
// ID is the swarmkit-internal ID for this volume object. This has no
// relation to the CSI volume identifier provided by the CSI Plugin.
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec is the desired state of the Volume, as provided by the user.
VolumeSpec spec = 3 [(gogoproto.nullable) = false];
// PublishStatus is the status of the volume as it pertains to the various
// nodes it is in use on.
repeated VolumePublishStatus publish_status = 4;
// VolumeInfo contains information about the volume originating from the
// CSI plugin when the volume is created.
VolumeInfo volume_info = 5;
// PendingDelete indicates that this Volume is being removed from Swarm.
// Before a Volume can be removed, we must call the DeleteVolume on the
// Controller. Because of this, we cannot immediately remove the Volume
// when a user wishes to delete it. Instead, we will mark a Volume with
// PendingDelete = true, which instructs Swarm to go through the work of
// removing the volume and then delete it when finished.
bool pending_delete = 6;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,151 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/objects.proto";
import "github.com/docker/swarmkit/api/types.proto";
import "go.etcd.io/etcd/raft/v3/raftpb/raft.proto";
import weak "gogoproto/gogo.proto";
import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// Raft defines the RPC communication between raft nodes.
service Raft {
// ProcessRaftMessage sends a raft message to be processed on a raft member, it is
// called from the RaftMember willing to send a message to its destination ('To' field)
rpc ProcessRaftMessage(ProcessRaftMessageRequest) returns (ProcessRaftMessageResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest
// to be processed on a raft member, returning a StreamRaftMessageResponse
// when processing of the streamed messages is complete. A single stream corresponds
// to a single raft message, which may be disassembled and streamed as individual messages.
// It is called from the Raft leader, which uses it to stream messages to a raft member.
rpc StreamRaftMessage(stream StreamRaftMessageRequest) returns (StreamRaftMessageResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// ResolveAddress returns the address where the node with the given ID can be reached.
rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
}
// RaftMembership defines RPCs for adding and removing members from the
// cluster. These RPCs must always run on the leader, so they are in a separate
// service to support the raft proxy.
service RaftMembership {
// Join adds a RaftMember to the raft cluster.
rpc Join(JoinRequest) returns (JoinResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
// Leave removes a RaftMember from the raft cluster.
rpc Leave(LeaveRequest) returns (LeaveResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
}
message RaftMember {
// RaftID specifies the internal ID used by the manager in a raft context, it can never be modified
// and is used only for information purposes
uint64 raft_id = 1;
// NodeID is the node's ID.
string node_id = 2;
// Addr specifies the address of the member
string addr = 3;
// Status provides the current status of the manager from the perspective of another manager.
RaftMemberStatus status = 4 [(gogoproto.nullable) = false];
}
message JoinRequest {
// Addr specifies the address of the member
string addr = 1;
}
message JoinResponse {
// RaftID is the ID assigned to the new member.
uint64 raft_id = 1;
// Members is the membership set of the cluster.
repeated RaftMember members = 2;
// RemovedMembers is a list of members that have been removed from
// the cluster, so the new node can avoid communicating with them.
repeated uint64 removed_members = 3 [packed=false];
}
message LeaveRequest {
RaftMember node = 1;
}
message LeaveResponse {}
message ProcessRaftMessageRequest {
option (docker.protobuf.plugin.deepcopy) = false;
raftpb.Message message = 1;
}
message ProcessRaftMessageResponse {}
// Raft message streaming request.
message StreamRaftMessageRequest {
option (docker.protobuf.plugin.deepcopy) = false;
raftpb.Message message = 1;
}
// Raft message streaming response.
message StreamRaftMessageResponse {}
message ResolveAddressRequest {
// raft_id is the ID to resolve to an address.
uint64 raft_id = 1;
}
message ResolveAddressResponse {
// Addr specifies the address of the member
string addr = 1;
}
// Contains one of many protobuf encoded objects to replicate
// over the raft backend with a request ID to track when the
// action is effectively applied
message InternalRaftRequest {
uint64 id = 1;
repeated StoreAction action = 2 [(gogoproto.nullable) = false];
}
// TODO(stevvooe): Storage actions may belong in another protobuf file. They
// aren't necessarily first-class "types" in the cluster schema.
// StoreActionKind defines the operation to take on the store for the target of
// a storage action.
enum StoreActionKind {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "StoreActionKind";
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StoreActionKindUnknown"]; // default value, invalid
STORE_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "StoreActionKindCreate"];
STORE_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "StoreActionKindUpdate"];
STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"];
}
// StoreAction defines a target and operation to apply on the storage system.
message StoreAction {
StoreActionKind action = 1;
oneof target {
Node node = 2;
Service service = 3;
Task task = 4;
Network network = 5;
Cluster cluster = 6;
Secret secret = 7;
Resource resource = 8;
Extension extension = 9;
Config config = 10;
Volume volume = 11;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/types.proto";
import "gogoproto/gogo.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
// Allocator is the API provided by a manager group for agents to control the allocation of certain entities.
//
// API methods on this service are used only by agent nodes.
service ResourceAllocator {
rpc AttachNetwork(AttachNetworkRequest) returns (AttachNetworkResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
rpc DetachNetwork(DetachNetworkRequest) returns (DetachNetworkResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
}
message AttachNetworkRequest {
NetworkAttachmentConfig config = 1;
string container_id = 2;
}
message AttachNetworkResponse {
string attachment_id = 1;
}
message DetachNetworkRequest {
string attachment_id = 1;
}
message DetachNetworkResponse {}

File diff suppressed because it is too large Load Diff

View File

@ -1,45 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/objects.proto";
import "github.com/docker/swarmkit/api/raft.proto";
import weak "gogoproto/gogo.proto";
// StoreSnapshot is used to store snapshots of the store.
message StoreSnapshot {
// TODO(aaronl): The current method of assembling a StoreSnapshot
// structure and marshalling it is not optimal. It may be better to
// write out nodes, networks, tasks, etc. one at a time to an io.Writer
// using gogo-protobuf's io.DelimitedWriter. A new value of the version
// field could support this approach.
repeated Node nodes = 1;
repeated Service services = 2;
repeated Network networks = 3;
repeated Task tasks = 4;
repeated Cluster clusters = 5;
repeated Secret secrets = 6;
repeated Resource resources = 7;
repeated Extension extensions = 8;
repeated Config configs = 9;
repeated Volume volumes = 10;
}
// ClusterSnapshot stores cluster membership information in snapshots.
message ClusterSnapshot {
repeated RaftMember members = 1;
repeated uint64 removed = 2 [packed=false];
}
message Snapshot {
enum Version {
// V0 is the initial version of the StoreSnapshot message.
V0 = 0;
}
Version version = 1;
ClusterSnapshot membership = 2 [(gogoproto.nullable) = false];
StoreSnapshot store = 3 [(gogoproto.nullable) = false];
}

File diff suppressed because it is too large Load Diff

View File

@ -1,576 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/types.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
// Specs are container objects for user provided input. All creations and
// updates are done through spec types. As a convention, user input from a spec
// is never touched in created objects. This allows one to verify that the
// users intent has not been modified.
//
// Put differently, spec types can be said to represent the desired state of
// the system. In situations where modifications need to be made to a
// particular component, API objects will either contain a copy of the spec
// component or a different representation to reflect allocation or resolution.
message NodeSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
enum Membership {
option (gogoproto.goproto_enum_prefix) = false;
PENDING = 0 [(gogoproto.enumvalue_customname) = "NodeMembershipPending"];
ACCEPTED = 1 [(gogoproto.enumvalue_customname) = "NodeMembershipAccepted"];
}
enum Availability {
option (gogoproto.goproto_enum_prefix) = false;
// Active nodes.
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "NodeAvailabilityActive"];
// Paused nodes won't be considered by the scheduler, preventing any
// further task to run on them.
PAUSE = 1 [(gogoproto.enumvalue_customname) = "NodeAvailabilityPause"];
// Drained nodes are paused and any task already running on them will
// be evicted.
DRAIN = 2 [(gogoproto.enumvalue_customname) = "NodeAvailabilityDrain"];
}
// DesiredRole defines the role the node should have.
NodeRole desired_role = 2;
// Membership controls the admission of the node into the cluster.
Membership membership = 3;
// Availability allows a user to control the current scheduling status of a
// node.
Availability availability = 4;
}
// ServiceSpec defines the properties of a service.
//
// A service instructs the cluster in orchestrating repeated instances of a
// template, implemented as tasks. Based on the number of instances, scheduling
// strategy and restart policy, a number of application-level behaviors can be
// defined.
message ServiceSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
// Task defines the task template this service will spawn.
TaskSpec task = 2 [(gogoproto.nullable) = false];
oneof mode {
ReplicatedService replicated = 3;
GlobalService global = 4;
ReplicatedJob replicated_job = 10;
GlobalJob global_job = 11;
}
// Update contains settings which affect updates.
UpdateConfig update = 6;
// Rollback contains settings which affect rollbacks of updates.
UpdateConfig rollback = 9;
// ServiceSpec.Networks has been deprecated and is replaced by
// Networks field in Task (TaskSpec.Networks).
// This field (ServiceSpec.Networks) is kept for compatibility.
// In case TaskSpec.Networks does not exist, ServiceSpec.Networks
// is still honored if it exists.
repeated NetworkAttachmentConfig networks = 7 [deprecated=true];
// Service endpoint specifies the user provided configuration
// to properly discover and load balance a service.
EndpointSpec endpoint = 8;
}
// ReplicatedService sets the reconciliation target to certain number of replicas.
message ReplicatedService {
uint64 replicas = 1;
}
// GlobalService represents global service.
message GlobalService {
// Empty message for now.
}
// ReplicatedJob is a certain type of one-off job which executes many Tasks in
// parallel until the specified number of Tasks have succeeded.
message ReplicatedJob {
// MaxConcurrent indicates the maximum number of Tasks that should be
// executing simultaneously at any given time.
uint64 max_concurrent = 1;
// TotalCompletions sets the total number of Tasks desired to run to
// completion. This is also the absolute maximum number of Tasks that will
// be executed in parallel. That is, if this number is smaller than
// MaxConcurrent, only this many replicas will run.
uint64 total_completions = 2;
}
// GlobalJob is a type of one-off job which executes one Task on every node
// matching the service's placement constraints.
message GlobalJob {
// Empty message for now.
}
message TaskSpec {
oneof runtime {
NetworkAttachmentSpec attachment = 8;
ContainerSpec container = 1;
GenericRuntimeSpec generic = 10;
}
// Resource requirements for the container.
ResourceRequirements resources = 2;
// RestartPolicy specifies what to do when a task fails or finishes.
RestartPolicy restart = 4;
// Placement specifies node selection constraints
Placement placement = 5;
// LogDriver specifies the log driver to use for the task. Any runtime will
// direct logs into the specified driver for the duration of the task.
Driver log_driver = 6;
// Networks specifies the list of network attachment
// configurations (which specify the network and per-network
// aliases) that this task spec is bound to.
repeated NetworkAttachmentConfig networks = 7;
// ForceUpdate is a counter that triggers an update even if no relevant
// parameters have been changed. We do this to allow forced restarts
// using the same reconciliation-based mechanism that performs rolling
// updates.
uint64 force_update = 9;
// ResourceReferences provides a generic way to specify resources that
// are used by this task, and should be sent down to agents along with
// the task. Inside the runtime field there may be more specific
// information about how to use the resource, but ResourceReferences
// establishes the relationship at the store level, and instructs the
// dispatcher to send the related objects.
//
// ResourceReferences is a list of ResourceReferences used by the task.
repeated ResourceReference resource_references = 11 [(gogoproto.nullable) = false];
}
message ResourceReference {
string resource_id = 1;
ResourceType resource_type = 2;
}
message GenericRuntimeSpec {
string kind = 1;
google.protobuf.Any payload = 2;
}
// NetworkAttachmentSpec specifies runtime parameters required to attach
// a container to a network.
message NetworkAttachmentSpec {
// ContainerID specifies a unique ID of the container for which
// this attachment is for.
string container_id = 1;
}
// Container specifies runtime parameters for a container.
message ContainerSpec {
// image defines the image reference, as specified in the
// distribution/reference package. This may include a registry host, name,
// tag or digest.
//
// The field will be directly passed to the engine pulling. Well-behaved
// service definitions will used immutable references, either through tags
// that don't change or verifiable digests.
string image = 1;
// Labels defines labels to be added to the container at creation time. If
// collisions with system labels occur, these labels will be overridden.
//
// This field *must* remain compatible with the Labels field of
// Annotations.
map<string, string> labels = 2;
// Command to run the the container. The first element is a path to the
// executable and the following elements are treated as arguments.
//
// If command is empty, execution will fall back to the image's entrypoint.
//
// Command should only be used when overriding entrypoint.
repeated string command = 3;
// Args specifies arguments provided to the image's entrypoint.
//
// If Command and Args are provided, Args will be appended to Command.
repeated string args = 4;
// Hostname specifies the hostname that will be set on containers created by docker swarm.
// All containers for a given service will have the same hostname
string hostname = 14;
// Env specifies the environment variables for the container in NAME=VALUE
// format. These must be compliant with [IEEE Std
// 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
repeated string env = 5;
// Dir defines the working directory to set for the container process.
string dir = 6;
// User specifies the user that should be employed to run the container.
//
// Note that the primary group may be specified by appending the group name
// or id to the user name, separated by a `:`. This syntax is
// `<user>:<group>`.
string user = 7;
// Groups specifies supplementary groups available to the user.
repeated string groups = 11;
// Privileges specifies security configuration/permissions.
Privileges privileges = 22;
// Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings
google.protobuf.BoolValue init = 23;
// TTY declares that a TTY should be attached to the standard streams,
// including stdin if it is still open.
bool tty = 13 [(gogoproto.customname) = "TTY"];
// OpenStdin declares that the standard input (stdin) should be open.
bool open_stdin = 18;
// ReadOnly declares that the container root filesystem is read-only.
// This only impacts the root filesystem, not additional mounts (including
// tmpfs). For additional mounts that are not part of the initial rootfs,
// they will be decided by the modes passed in the mount definition.
bool read_only = 19;
// StopSignal defines the signal to stop the container.
string stop_signal = 20;
repeated Mount mounts = 8 [(gogoproto.nullable) = false];
// StopGracePeriod the grace period for stopping the container before
// forcefully killing the container.
// Note: Can't use stdduration here because this needs to be nullable.
google.protobuf.Duration stop_grace_period = 9;
// PullOptions allows one to parameterize an image pull.
message PullOptions {
// RegistryAuth is the registry auth token obtained from the client, required
// to pull private images. This is the unmodified JSON used as part of
// the `X-Registry-Auth` header.
// TODO(nishanttotla): This field will later be deprecated
string registry_auth = 64;
}
// PullOptions parameterize the behavior of image pulls.
PullOptions pull_options = 10;
// SecretReference contains references to zero or more secrets that
// will be exposed to the container.
repeated SecretReference secrets = 12;
// ConfigReference contains references to zero or more configs that
// will be exposed to the container.
repeated ConfigReference configs = 21;
// Hosts allow additional entries to be specified in /etc/hosts
// that associates IP addresses with hostnames.
// Detailed documentation is available in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
//
// The format of the Hosts in swarmkit follows the same as
// above.
// This is different from `docker run --add-host <hostname>:<ip>`
// where format is `<hostname>:<ip>`
repeated string hosts = 17;
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
// Detailed documentation is available in:
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
// TODO: domain is not supported yet
message DNSConfig {
// Nameservers specifies the IP addresses of the name servers
repeated string nameservers = 1;
// Search specifies the search list for host-name lookup
repeated string search = 2;
// Options allows certain internal resolver variables to be modified
repeated string options = 3;
}
// DNSConfig allows one to specify DNS related configuration in resolv.conf
DNSConfig dns_config = 15 [(gogoproto.customname) = "DNSConfig"];
// Healthcheck describes how to check the container is healthy. If the
// container is considered unhealthy, it will be destroyed, its creating
// task will exit and a new task will be rescheduled elsewhere. A container
// is considered unhealthy after `Retries` number of consecutive failures.
HealthConfig healthcheck = 16;
enum Isolation {
option (gogoproto.goproto_enum_prefix) = false;
// ISOLATION_DEFAULT uses whatever default value from the container runtime
ISOLATION_DEFAULT = 0 [(gogoproto.enumvalue_customname) = "ContainerIsolationDefault"];
// ISOLATION_PROCESS forces windows container isolation
ISOLATION_PROCESS = 1 [(gogoproto.enumvalue_customname) = "ContainerIsolationProcess"];
// ISOLATION_HYPERV forces Hyper-V isolation
ISOLATION_HYPERV = 2 [(gogoproto.enumvalue_customname) = "ContainerIsolationHyperV"];
}
// Isolation defines the isolation level for windows containers (default, process, hyperv).
// Runtimes that don't support it ignore that field
Isolation isolation = 24;
// PidsLimit prevents from OS resource damage by applications inside the container
// using fork bomb attack.
int64 pidsLimit = 25;
// Sysctls sets namespaced kernel parameters (sysctls) in the container. This
// option is equivalent to passing --sysctl to docker run.
//
// Note that while options are subject to the same restrictions as arguments
// passed to the --sysctl flag on docker run, those options are not further
// validated to ensure that they are safe or sensible in a clustered
// environment.
//
// Additionally, sysctls are not validated for support in the underlying
// daemon. For information about supported options, refer to the
// documentation at:
//
// https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime
map<string, string> sysctls = 26;
// CapabilityAdd sets the list of capabilities to add to the default capability list
repeated string capability_add = 27;
// CapabilityDrop sets the list of capabilities to drop from the default capability list
repeated string capability_drop = 28;
message Ulimit {
string name = 1;
int64 soft = 2;
int64 hard = 3;
}
// Ulimits defines the list of ulimits to set in the container. This option
// is equivalent to passing --ulimit to docker run.
repeated Ulimit ulimits = 29;
}
// EndpointSpec defines the properties that can be configured to
// access and loadbalance the service.
message EndpointSpec {
// ResolutionMode specifies the mode of resolution to use for
// internal loadbalancing between tasks which are all within
// the cluster. This is sometimes calls east-west data path.
enum ResolutionMode {
option (gogoproto.goproto_enum_prefix) = false;
// VIP resolution mode specifies that the
// service resolves to a logical IP and the requests
// are sent to that logical IP. Packets hitting that
// logical IP are load balanced to a chosen backend.
VIP = 0 [(gogoproto.enumvalue_customname) = "ResolutionModeVirtualIP"];
// DNSRR resolution mode specifies that the
// service directly gets resolved to one of the
// backend IP and the client directly initiates a
// request towards the actual backend. This requires
// that the client does not cache the DNS responses
// when the DNS response TTL is 0.
DNSRR = 1 [(gogoproto.enumvalue_customname) = "ResolutionModeDNSRoundRobin"];
}
ResolutionMode mode = 1;
// List of exposed ports that this service is accessible from
// external to the cluster.
repeated PortConfig ports = 2;
}
// NetworkSpec specifies user defined network parameters.
message NetworkSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
// DriverConfig specific configuration consumed by the network driver.
Driver driver_config = 2;
// IPv6Enabled enables support for IPv6 on the network.
bool ipv6_enabled = 3;
// internal restricts external access to the network. This may be
// accomplished by disabling the default gateway or through other means.
bool internal = 4;
IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"];
// Attachable allows external(to swarm) entities to manually
// attach to this network. With this flag enabled, external
// entities such as containers running in an worker node in
// the cluster can manually attach to this network and access
// the services attached to this network. If this flag is not
// enabled(default case) no manual attachment to this network
// can happen.
bool attachable = 6;
// Ingress indicates this network will provide the routing-mesh.
// In older versions, the network providing the routing mesh was
// swarm internally created only and it was identified by the name
// "ingress" and the label "com.docker.swarm.internal": "true".
bool ingress = 7;
// ConfigFrom is the source of the configuration for this network.
oneof config_from {
// Network is the name of a network that provides the network
// specific configuration for this network, locally on the node
// where this network is being plumbed.
string network = 8;
}
}
// ClusterSpec specifies global cluster settings.
message ClusterSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
// DEPRECATED: AcceptancePolicy defines the certificate issuance policy.
// Acceptance policy is no longer customizable, and secrets have been
// replaced with join tokens.
AcceptancePolicy acceptance_policy = 2 [deprecated=true, (gogoproto.nullable) = false];
// Orchestration defines cluster-level orchestration settings.
OrchestrationConfig orchestration = 3 [(gogoproto.nullable) = false];
// Raft defines the cluster's raft settings.
RaftConfig raft = 4 [(gogoproto.nullable) = false];
// Dispatcher defines cluster-level dispatcher settings.
DispatcherConfig dispatcher = 5 [(gogoproto.nullable) = false];
// CAConfig defines cluster-level certificate authority settings.
CAConfig ca_config = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "CAConfig"];
// TaskDefaults specifies the default values to use for task creation.
TaskDefaults task_defaults = 7 [(gogoproto.nullable) = false];
// EncryptionConfig defines the cluster's encryption settings.
EncryptionConfig encryption_config = 8 [(gogoproto.nullable) = false];
}
// SecretSpec specifies a user-provided secret.
message SecretSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
// Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes)
bytes data = 2;
// Templating controls whether and how to evaluate the secret payload as
// a template. If it is not set, no templating is used.
//
// The currently recognized values are:
// - golang: Go templating
Driver templating = 3;
// Driver is the the secret driver that is used to store the specified secret
Driver driver = 4;
}
// ConfigSpec specifies user-provided configuration files.
message ConfigSpec {
Annotations annotations = 1 [(gogoproto.nullable) = false];
// Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes)
// TODO(aaronl): Do we want to revise this to include multiple payloads in a single
// ConfigSpec? Define this to be a tar? etc...
bytes data = 2;
// Templating controls whether and how to evaluate the secret payload as
// a template. If it is not set, no templating is used.
//
// The currently recognized values are:
// - golang: Go templating
Driver templating = 3;
}
message VolumeSpec {
// Annotations includes the name and labels of a volume. The name used in the
// spec's Annotations will be passed to the Plugin as the "Name" in the
// CreateVolume request.
Annotations annotations = 1 [(gogoproto.nullable) = false];
// Group defines the volume group this particular volume belongs to. When
// requesting volumes for a workload, the group name can be used instead of
// the volume's name, which tells swarmkit to pick one from the many volumes
// belonging to that group.
string group = 2;
// Driver represents the CSI Plugin object and its configuration parameters.
// The "options" field of the Driver object is passed in the CSI
// CreateVolumeRequest as the "parameters" field. The Driver must be
// specified; there is no default CSI Plugin.
Driver driver = 3;
// AccessMode is similar to, and used to determine, the volume access mode as
// defined in the CSI spec, as well as the volume type (block vs mount). In
// this way, it is more similar to the VolumeCapability message in the CSI
// spec.
VolumeAccessMode access_mode = 4;
// Secrets represents a set of key/value pairs to pass to the CSI plugin. The
// keys of the secrets can be anything, but the values refer to swarmkit
// Secret objects. See the "Secrets Requirements" section of the CSI Plugin
// Spec for more information.
repeated VolumeSecret secrets = 5;
// AccessibilityRequirements specifies where a volume must be accessible
// from.
//
// This field must be empty if the plugin does not support
// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
// plugin does not support it, volume will not be created.
//
// If AccessibilityRequirements is empty, but the plugin does support
// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
// cluster is a valid target for the volume.
TopologyRequirement AccessibilityRequirements = 6;
// CapacityRange is the capacity this volume should be created with. If nil,
// the plugin will decide the capacity.
CapacityRange capacity_range = 7;
enum VolumeAvailability {
option (gogoproto.goproto_enum_prefix) = false;
// Active allows a volume to be used and scheduled to. This is the
// default state.
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityActive"];
// Pause prevents volumes from having new workloads scheduled to use
// them, even if they're already published on a Node.
PAUSE = 1 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityPause"];
// Drain causes existing workloads using this volume to be rescheduled,
// causing the volume to be unpublished and removed from nodes.
DRAIN = 2 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityDrain"];
}
// Availability is the Volume's desired availability. Analogous to Node
// Availability, this allows the user to take volumes offline in order to
// update or delete them.
VolumeAvailability availability = 8;
}

View File

@ -1,123 +0,0 @@
package api
import (
"errors"
"fmt"
"strings"
"github.com/docker/go-events"
)
var (
errUnknownStoreAction = errors.New("unrecognized action type")
errConflictingFilters = errors.New("conflicting filters specified")
errNoKindSpecified = errors.New("no kind of object specified")
errUnrecognizedAction = errors.New("unrecognized action")
)
// StoreObject is an abstract object that can be handled by the store.
type StoreObject interface {
GetID() string // Get ID
GetMeta() Meta // Retrieve metadata
SetMeta(Meta) // Set metadata
CopyStoreObject() StoreObject // Return a copy of this object
EventCreate() Event // Return a creation event
EventUpdate(oldObject StoreObject) Event // Return an update event
EventDelete() Event // Return a deletion event
}
// Event is the type used for events passed over watcher channels, and also
// the type used to specify filtering in calls to Watch.
type Event interface {
// TODO(stevvooe): Consider whether it makes sense to squish both the
// matcher type and the primary type into the same type. It might be better
// to build a matcher from an event prototype.
// Matches checks if this item in a watch queue Matches the event
// description.
Matches(events.Event) bool
}
// EventCreate is an interface implemented by every creation event type
type EventCreate interface {
IsEventCreate() bool
}
// EventUpdate is an interface impelemented by every update event type
type EventUpdate interface {
IsEventUpdate() bool
}
// EventDelete is an interface implemented by every delete event type
type EventDelete interface {
IsEventDelete()
}
func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) {
var converted [][]byte
for _, entry := range annotations.Indices {
index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1)
if kind != "" {
index = append(index, []byte(kind)...)
index = append(index, '|')
}
index = append(index, []byte(entry.Key)...)
index = append(index, '|')
index = append(index, []byte(entry.Val)...)
index = append(index, '\x00')
converted = append(converted, index)
}
// Add the null character as a terminator
return len(converted) != 0, converted, nil
}
func fromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}
func prefixFromArgs(args ...interface{}) ([]byte, error) {
val, err := fromArgs(args...)
if err != nil {
return nil, err
}
// Strip the null terminator, the rest is a prefix
n := len(val)
if n > 0 {
return val[:n-1], nil
}
return val, nil
}
func checkCustom(a1, a2 Annotations) bool {
if len(a1.Indices) == 1 {
for _, ind := range a2.Indices {
if ind.Key == a1.Indices[0].Key && ind.Val == a1.Indices[0].Val {
return true
}
}
}
return false
}
func checkCustomPrefix(a1, a2 Annotations) bool {
if len(a1.Indices) == 1 {
for _, ind := range a2.Indices {
if ind.Key == a1.Indices[0].Key && strings.HasPrefix(ind.Val, a1.Indices[0].Val) {
return true
}
}
}
return false
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,155 +0,0 @@
syntax = "proto3";
package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/specs.proto";
import "github.com/docker/swarmkit/api/objects.proto";
import "github.com/docker/swarmkit/api/types.proto";
import "gogoproto/gogo.proto";
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
message Object {
oneof Object {
Node node = 1;
Service service = 2;
Network network = 3;
Task task = 4;
Cluster cluster = 5;
Secret secret = 6;
Resource resource = 7;
Extension extension = 8;
Config config = 9;
Volume volume = 10;
}
}
// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but
// protoc generates bad code for that.
message SelectBySlot {
string service_id = 1 [(gogoproto.customname) = "ServiceID"];
uint64 slot = 2;
}
message SelectByCustom {
string kind = 1;
string index = 2;
string value = 3;
}
message SelectBy {
// TODO(aaronl): Are all of these things we want to expose in
// the API? Exposing them may commit us to maintaining those
// internal indices going forward.
oneof By {
// supported by all object types
string id = 1 [(gogoproto.customname) = "ID"]; // not applicable for FindObjects - use GetObject instead
string id_prefix = 2 [(gogoproto.customname) = "IDPrefix"];
string name = 3;
string name_prefix = 4;
SelectByCustom custom = 5;
SelectByCustom custom_prefix = 6;
// supported by tasks only
string service_id = 7 [(gogoproto.customname) = "ServiceID"];
string node_id = 8 [(gogoproto.customname) = "NodeID"];
SelectBySlot slot = 9;
TaskState desired_state = 10;
// supported by nodes only
NodeRole role = 11;
NodeSpec.Membership membership = 12;
// supported by: service, task
string referenced_network_id = 13 [(gogoproto.customname) = "ReferencedNetworkID"];
string referenced_secret_id = 14 [(gogoproto.customname) = "ReferencedSecretID"];
string referenced_config_id = 16 [(gogoproto.customname) = "ReferencedConfigID"];
// supported by: resource
string kind = 15;
}
}
// Watch defines the RPC methods for monitoring data store change.
service Watch {
// Watch starts a stream that returns any changes to objects that match
// the specified selectors. When the stream begins, it immediately sends
// an empty message back to the client. It is important to wait for
// this message before taking any actions that depend on an established
// stream of changes for consistency.
rpc Watch(WatchRequest) returns (stream WatchMessage) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
}
message WatchRequest {
message WatchEntry {
// Kind can contain a builtin type such as "node", "secret", etc. or
// the kind specified by a custom-defined object.
string kind = 1;
// Action (create/update/delete)
// This is a bitmask, so multiple actions may be OR'd together
WatchActionKind action = 2;
// Filters are combined using AND logic - an event must match
// all of them to pass the filter.
repeated SelectBy filters = 3;
}
// Multiple entries are combined using OR logic - i.e. if an event
// matches all of the selectors specified in any single watch entry,
// the event will be sent to the client.
repeated WatchEntry entries = 1;
// ResumeFrom provides an version to resume the watch from, if non-nil.
// The watch will return changes since this version, and continue to
// return new changes afterwards. Watch will return an error if the
// server has compacted its log and no longer has complete history to
// this point.
Version resume_from = 2;
// IncludeOldObject causes WatchMessages to include a copy of the
// previous version of the object on updates. Note that only live
// changes will include the old object (not historical changes
// retrieved using ResumeFrom).
bool include_old_object = 3;
}
// WatchMessage is the type of the stream that's returned to the client by
// Watch. Note that the first item of this stream will always be a WatchMessage
// with a nil Object, to signal that the stream has started.
message WatchMessage {
message Event {
// Action (create/update/delete)
// Note that WatchMessage does not expose "commit" events that
// mark transaction boundaries.
WatchActionKind action = 1;
// Matched object
Object object = 2;
// For updates, OldObject will optionally be included in the
// watch message, containing the previous version of the
// object, if IncludeOldObject was set in WatchRequest.
Object old_object = 3;
}
repeated Event events = 1;
// Index versions this change to the data store. It can be used to
// resume the watch from this point.
Version version = 2;
}
// WatchActionKind distinguishes between creations, updates, and removals. It
// is structured as a bitmap so multiple kinds of events can be requested with
// a mask.
enum WatchActionKind {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "WatchActionKind";
WATCH_ACTION_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "WatchActionKindUnknown"]; // default value, invalid
WATCH_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "WatchActionKindCreate"];
WATCH_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "WatchActionKindUpdate"];
WATCH_ACTION_REMOVE = 4 [(gogoproto.enumvalue_customname) = "WatchActionKindRemove"];
}

View File

@ -1,19 +0,0 @@
package raftselector
import (
"context"
"errors"
"google.golang.org/grpc"
)
// ConnProvider is basic interface for connecting API package(raft proxy in particular)
// to manager/state/raft package without import cycles. It provides only one
// method for obtaining connection to leader.
type ConnProvider interface {
LeaderConn(ctx context.Context) (*grpc.ClientConn, error)
}
// ErrIsLeader is returned from LeaderConn method when current machine is leader.
// It's just shim between packages to avoid import cycles.
var ErrIsLeader = errors.New("current node is leader")

View File

@ -1,11 +0,0 @@
package plugin
import (
"github.com/gogo/protobuf/proto"
google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
)
// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor.
func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool {
return proto.GetBoolExtension(options, E_Deepcopy, true)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,53 +0,0 @@
syntax = "proto2";
package docker.protobuf.plugin;
import "google/protobuf/descriptor.proto";
message WatchSelectors {
// supported by all object types
optional bool id = 1;
optional bool id_prefix = 2;
optional bool name = 3;
optional bool name_prefix = 4;
optional bool custom = 5;
optional bool custom_prefix = 6;
// supported by tasks only
optional bool service_id = 7;
optional bool node_id = 8;
optional bool slot = 9;
optional bool desired_state = 10;
// supported by nodes only
optional bool role = 11;
optional bool membership = 12;
// supported by: resource
optional bool kind = 13;
}
message StoreObject {
required WatchSelectors watch_selectors = 1;
}
extend google.protobuf.MessageOptions {
optional bool deepcopy = 70000 [default=true];
optional StoreObject store_object = 70001;
}
message TLSAuthorization {
// Roles contains the acceptable TLS OU roles for the handler.
repeated string roles = 1;
// Insecure is set to true if this method does not require
// authorization. NOTE: Specifying both "insecure" and a nonempty
// list of roles is invalid. This would fail at codegen time.
optional bool insecure = 2;
}
extend google.protobuf.MethodOptions {
// TLSAuthorization contains the authorization parameters for this
// method.
optional TLSAuthorization tls_authorization = 73626345;
}