Fix PR comments
- More strict on orchestrator flag
- Make orchestrator flag more explicit as experimental
- Add experimentalCLI annotation on kubernetes flags
- Better kubeconfig error message
- Prefix service name with stackname in ps and services stack subcommands
- Fix yaml documentation
- Fix code coverage ignoring generated code
Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
Upstream-commit: f1b116179f
Component: cli
This commit is contained in:
@ -140,7 +140,6 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||
cli.clientInfo = ClientInfo{
|
||||
DefaultVersion: cli.client.ClientVersion(),
|
||||
HasExperimental: hasExperimental,
|
||||
HasKubernetes: hasExperimental && orchestrator == OrchestratorKubernetes,
|
||||
Orchestrator: orchestrator,
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
@ -206,11 +205,15 @@ type ServerInfo struct {
|
||||
// ClientInfo stores details about the supported features of the client
|
||||
type ClientInfo struct {
|
||||
HasExperimental bool
|
||||
HasKubernetes bool
|
||||
DefaultVersion string
|
||||
Orchestrator Orchestrator
|
||||
}
|
||||
|
||||
// HasKubernetes checks if kubernetes orchestrator is enabled
|
||||
func (c ClientInfo) HasKubernetes() bool {
|
||||
return c.HasExperimental && c.Orchestrator == OrchestratorKubernetes
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
|
||||
|
||||
@ -171,7 +171,7 @@ func TestExperimentalCLI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrchestratorSwitch(t *testing.T) {
|
||||
defaultVersion := "v1.55"
|
||||
defaultVersion := "v0.00"
|
||||
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
@ -268,7 +268,7 @@ func TestOrchestratorSwitch(t *testing.T) {
|
||||
}
|
||||
err := cli.Initialize(options)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testcase.expectedKubernetes, cli.ClientInfo().HasKubernetes)
|
||||
assert.Equal(t, testcase.expectedKubernetes, cli.ClientInfo().HasKubernetes())
|
||||
assert.Equal(t, testcase.expectedOrchestrator, string(cli.ClientInfo().Orchestrator))
|
||||
})
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
@ -16,12 +15,12 @@ const (
|
||||
OrchestratorSwarm = Orchestrator("swarm")
|
||||
orchestratorUnset = Orchestrator("unset")
|
||||
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
dockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
func normalize(flag string) Orchestrator {
|
||||
switch strings.ToLower(flag) {
|
||||
switch flag {
|
||||
case "kubernetes", "k8s":
|
||||
return OrchestratorKubernetes
|
||||
case "swarm", "swarmkit":
|
||||
@ -43,7 +42,7 @@ func GetOrchestrator(isExperimental bool, flagValue, value string) Orchestrator
|
||||
return o
|
||||
}
|
||||
// Check environment variable
|
||||
env := os.Getenv(dockerOrchestrator)
|
||||
env := os.Getenv(envVarDockerOrchestrator)
|
||||
if o := normalize(env); o != orchestratorUnset {
|
||||
return o
|
||||
}
|
||||
|
||||
@ -25,8 +25,10 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.PersistentFlags()
|
||||
flags.String("namespace", "default", "Kubernetes namespace to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "experimentalCLI", nil)
|
||||
flags.String("kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "experimentalCLI", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ func newDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@ -49,7 +50,7 @@ func WrapCli(dockerCli command.Cli, cmd *cobra.Command) (*KubeCli, error) {
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Failed to load kubernetes configuration file '%s'", kubeConfig)
|
||||
}
|
||||
cli.kubeConfig = config
|
||||
|
||||
|
||||
@ -126,12 +126,16 @@ func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.Se
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not find service '%s'", r.Labels[labels.ForServiceName])
|
||||
}
|
||||
stack, ok := service.Labels[labels.ForStackName]
|
||||
if ok {
|
||||
stack += "_"
|
||||
}
|
||||
uid := string(service.UID)
|
||||
s := swarm.Service{
|
||||
ID: uid,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: service.Name,
|
||||
Name: stack + service.Name,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
|
||||
@ -20,7 +20,7 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
||||
return errors.Errorf("Please specify a Compose file (with --compose-file).")
|
||||
}
|
||||
// Initialize clients
|
||||
stackInterface, err := dockerCli.stacks()
|
||||
stacks, err := dockerCli.stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -28,12 +28,12 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configMapInterface := composeClient.ConfigMaps()
|
||||
secretInterface := composeClient.Secrets()
|
||||
serviceInterface := composeClient.Services()
|
||||
podInterface := composeClient.Pods()
|
||||
configMaps := composeClient.ConfigMaps()
|
||||
secrets := composeClient.Secrets()
|
||||
services := composeClient.Services()
|
||||
pods := composeClient.Pods()
|
||||
watcher := DeployWatcher{
|
||||
Pods: podInterface,
|
||||
Pods: pods,
|
||||
}
|
||||
|
||||
// Parse the compose file
|
||||
@ -43,28 +43,28 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) handle warnings server-side
|
||||
if err = IsColliding(serviceInterface, stack, cfg); err != nil {
|
||||
if err = IsColliding(services, stack, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMapInterface); err != nil {
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secretInterface); err != nil {
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secrets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in, err := stackInterface.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
if in, err := stacks.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
in.Spec = stack.Spec
|
||||
|
||||
if _, err = stackInterface.Update(in); err != nil {
|
||||
if _, err = stacks.Update(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Stack %s was updated\n", stack.Name)
|
||||
} else {
|
||||
if _, err = stackInterface.Create(stack); err != nil {
|
||||
if _, err = stacks.Create(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
||||
<-watcher.Watch(stack, serviceNames(cfg))
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.Name)
|
||||
// fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
// TODO: fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -55,12 +55,12 @@ func RunPS(dockerCli *KubeCli, options options.PS) error {
|
||||
for i, pod := range pods {
|
||||
tasks[i] = podToTask(pod)
|
||||
}
|
||||
return print(dockerCli, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format)
|
||||
return print(dockerCli, namespace, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format)
|
||||
}
|
||||
|
||||
type idResolver func(name string) (string, error)
|
||||
|
||||
func print(dockerCli command.Cli, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error {
|
||||
func print(dockerCli command.Cli, namespace string, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error {
|
||||
sort.Stable(tasksBySlot(tasks))
|
||||
|
||||
names := map[string]string{}
|
||||
@ -78,7 +78,7 @@ func print(dockerCli command.Cli, tasks []swarm.Task, pods []apiv1.Pod, nodeReso
|
||||
return err
|
||||
}
|
||||
|
||||
names[task.ID] = pods[i].Name
|
||||
names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name)
|
||||
nodes[task.ID] = nodeValue
|
||||
}
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "List stacks",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -19,7 +19,7 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -19,7 +19,7 @@ func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespaces = args
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -19,7 +19,7 @@ func newServicesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -54,7 +54,8 @@ func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`)
|
||||
flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify")
|
||||
flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote")
|
||||
flags.StringVar(&commonOpts.Orchestrator, "orchestrator", "", "Which orchestrator to use with the docker cli (swarm|kubernetes)")
|
||||
flags.StringVar(&commonOpts.Orchestrator, "orchestrator", "", "Which orchestrator to use with the docker cli (swarm|kubernetes) (default swarm) (experimental)")
|
||||
flags.SetAnnotation("orchestrator", "experimentalCLI", nil)
|
||||
|
||||
// TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user