Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3c863ff8d3 | |||
| c1b7df309a | |||
| d260a54c81 | |||
| 3369ffe3e5 | |||
| 3cf84fbc5b | |||
| b1b03b3caa | |||
| 57d2fbb70e | |||
| c33cc928bb | |||
| 156e20ca86 | |||
| 7522a62d26 | |||
| 073e4e850e | |||
| 6d46ff7438 | |||
| 1d214b009d | |||
| 3092f67d5d | |||
| f01c09045d |
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -22,13 +22,9 @@ Please provide the following information:
|
||||
**- Description for the changelog**
|
||||
<!--
|
||||
Write a short (one line) summary that describes the changes in this
|
||||
pull request for inclusion in the changelog.
|
||||
It must be placed inside the below triple backticks section:
|
||||
pull request for inclusion in the changelog:
|
||||
-->
|
||||
```markdown changelog
|
||||
|
||||
|
||||
```
|
||||
|
||||
**- A picture of a cute animal (not mandatory but encouraged)**
|
||||
|
||||
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@ -64,7 +64,7 @@ jobs:
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.10
|
||||
go-version: 1.21.9
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
|
||||
62
.github/workflows/validate-pr.yml
vendored
62
.github/workflows/validate-pr.yml
vendored
@ -1,62 +0,0 @@
|
||||
name: validate-pr
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||
run: |
|
||||
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
|
||||
exit 1
|
||||
- name: OK
|
||||
run: exit 0
|
||||
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
PR_BODY: |
|
||||
${{ github.event.pull_request.body }}
|
||||
steps:
|
||||
- name: Check changelog description
|
||||
run: |
|
||||
# Extract the `markdown changelog` note code block
|
||||
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
|
||||
|
||||
# Strip empty lines
|
||||
desc=$(echo "$block" | awk NF)
|
||||
|
||||
if [ -z "$desc" ]; then
|
||||
echo "::error::Changelog section is empty. Please provide a description for the changelog."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
len=$(echo -n "$desc" | wc -c)
|
||||
if [[ $len -le 6 ]]; then
|
||||
echo "::error::Description looks too short: $desc"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "This PR will be included in the release notes with the following note:"
|
||||
echo "$desc"
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
steps:
|
||||
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
|
||||
# [X.Y backport] Some change that needs backporting to X.Y
|
||||
# [X.Y] Change directly targeting the X.Y branch
|
||||
- name: Get branch from PR title
|
||||
id: title_branch
|
||||
run: echo "$PR_TITLE" | sed -n 's/^\[\([0-9]*\.[0-9]*\)[^]]*\].*/branch=\1/p' >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check release branch
|
||||
if: github.event.pull_request.base.ref != steps.title_branch.outputs.branch && !(github.event.pull_request.base.ref == 'master' && steps.title_branch.outputs.branch == '')
|
||||
run: echo "::error::PR title suggests targetting the ${{ steps.title_branch.outputs.branch }} branch, but is opened against ${{ github.event.pull_request.base.ref }}" && exit 1
|
||||
@ -84,7 +84,7 @@ use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contr
|
||||
<tr>
|
||||
<td>Community Slack</td>
|
||||
<td>
|
||||
The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up <a href="https://dockr.ly/comm-slack" target="_blank">with this link</a>.
|
||||
The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up <a href="https://dockr.ly/slack" target="_blank">with this link</a>.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
@ -4,7 +4,7 @@ ARG BASE_VARIANT=alpine
|
||||
ARG ALPINE_VERSION=3.18
|
||||
ARG BASE_DEBIAN_DISTRO=bookworm
|
||||
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GO_VERSION=1.21.9
|
||||
ARG XX_VERSION=1.4.0
|
||||
ARG GOVERSIONINFO_VERSION=v1.3.0
|
||||
ARG GOTESTSUM_VERSION=v1.10.0
|
||||
|
||||
@ -1,18 +0,0 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/morikuni/aec"
|
||||
)
|
||||
|
||||
func PrintNextSteps(out io.Writer, messages []string) {
|
||||
if len(messages) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(out, aec.Bold.Apply("\nWhat's next:"))
|
||||
for _, n := range messages {
|
||||
_, _ = fmt.Fprintf(out, " %s\n", n)
|
||||
}
|
||||
}
|
||||
@ -1,38 +0,0 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/morikuni/aec"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestPrintHookMessages(t *testing.T) {
|
||||
testCases := []struct {
|
||||
messages []string
|
||||
expectedOutput string
|
||||
}{
|
||||
{
|
||||
messages: []string{},
|
||||
expectedOutput: "",
|
||||
},
|
||||
{
|
||||
messages: []string{"Bork!"},
|
||||
expectedOutput: aec.Bold.Apply("\nWhat's next:") + "\n" +
|
||||
" Bork!\n",
|
||||
},
|
||||
{
|
||||
messages: []string{"Foo", "bar"},
|
||||
expectedOutput: aec.Bold.Apply("\nWhat's next:") + "\n" +
|
||||
" Foo\n" +
|
||||
" bar\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
w := bytes.Buffer{}
|
||||
PrintNextSteps(&w, tc.messages)
|
||||
assert.Equal(t, w.String(), tc.expectedOutput)
|
||||
}
|
||||
}
|
||||
@ -1,116 +0,0 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type HookType int
|
||||
|
||||
const (
|
||||
NextSteps = iota
|
||||
)
|
||||
|
||||
// HookMessage represents a plugin hook response. Plugins
|
||||
// declaring support for CLI hooks need to print a json
|
||||
// representation of this type when their hook subcommand
|
||||
// is invoked.
|
||||
type HookMessage struct {
|
||||
Type HookType
|
||||
Template string
|
||||
}
|
||||
|
||||
// TemplateReplaceSubcommandName returns a hook template string
|
||||
// that will be replaced by the CLI subcommand being executed
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "you ran the subcommand: " + TemplateReplaceSubcommandName()
|
||||
//
|
||||
// when being executed after the command:
|
||||
// `docker run --name "my-container" alpine`
|
||||
// will result in the message:
|
||||
// `you ran the subcommand: run`
|
||||
func TemplateReplaceSubcommandName() string {
|
||||
return hookTemplateCommandName
|
||||
}
|
||||
|
||||
// TemplateReplaceFlagValue returns a hook template string
|
||||
// that will be replaced by the flags value.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "you ran a container named: " + TemplateReplaceFlagValue("name")
|
||||
//
|
||||
// when being executed after the command:
|
||||
// `docker run --name "my-container" alpine`
|
||||
// will result in the message:
|
||||
// `you ran a container named: my-container`
|
||||
func TemplateReplaceFlagValue(flag string) string {
|
||||
return fmt.Sprintf(hookTemplateFlagValue, flag)
|
||||
}
|
||||
|
||||
// TemplateReplaceArg takes an index i and returns a hook
|
||||
// template string that the CLI will replace the template with
|
||||
// the ith argument, after processing the passed flags.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "run this image with `docker run " + TemplateReplaceArg(0) + "`"
|
||||
//
|
||||
// when being executed after the command:
|
||||
// `docker pull alpine`
|
||||
// will result in the message:
|
||||
// "Run this image with `docker run alpine`"
|
||||
func TemplateReplaceArg(i int) string {
|
||||
return fmt.Sprintf(hookTemplateArg, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
func ParseTemplate(hookTemplate string, cmd *cobra.Command) ([]string, error) {
|
||||
tmpl := template.New("").Funcs(commandFunctions)
|
||||
tmpl, err := tmpl.Parse(hookTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := bytes.Buffer{}
|
||||
err = tmpl.Execute(&b, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return strings.Split(b.String(), "\n"), nil
|
||||
}
|
||||
|
||||
var ErrHookTemplateParse = errors.New("failed to parse hook template")
|
||||
|
||||
const (
|
||||
hookTemplateCommandName = "{{.Name}}"
|
||||
hookTemplateFlagValue = `{{flag . "%s"}}`
|
||||
hookTemplateArg = "{{arg . %s}}"
|
||||
)
|
||||
|
||||
var commandFunctions = template.FuncMap{
|
||||
"flag": getFlagValue,
|
||||
"arg": getArgValue,
|
||||
}
|
||||
|
||||
func getFlagValue(cmd *cobra.Command, flag string) (string, error) {
|
||||
cmdFlag := cmd.Flag(flag)
|
||||
if cmdFlag == nil {
|
||||
return "", ErrHookTemplateParse
|
||||
}
|
||||
return cmdFlag.Value.String(), nil
|
||||
}
|
||||
|
||||
func getArgValue(cmd *cobra.Command, i int) (string, error) {
|
||||
flags := cmd.Flags()
|
||||
if flags == nil {
|
||||
return "", ErrHookTemplateParse
|
||||
}
|
||||
return flags.Arg(i), nil
|
||||
}
|
||||
@ -1,86 +0,0 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestParseTemplate(t *testing.T) {
|
||||
type testFlag struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
testCases := []struct {
|
||||
template string
|
||||
flags []testFlag
|
||||
args []string
|
||||
expectedOutput []string
|
||||
}{
|
||||
{
|
||||
template: "",
|
||||
expectedOutput: []string{""},
|
||||
},
|
||||
{
|
||||
template: "a plain template message",
|
||||
expectedOutput: []string{"a plain template message"},
|
||||
},
|
||||
{
|
||||
template: TemplateReplaceFlagValue("tag"),
|
||||
flags: []testFlag{
|
||||
{
|
||||
name: "tag",
|
||||
value: "my-tag",
|
||||
},
|
||||
},
|
||||
expectedOutput: []string{"my-tag"},
|
||||
},
|
||||
{
|
||||
template: TemplateReplaceFlagValue("test-one") + " " + TemplateReplaceFlagValue("test2"),
|
||||
flags: []testFlag{
|
||||
{
|
||||
name: "test-one",
|
||||
value: "value",
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
value: "value2",
|
||||
},
|
||||
},
|
||||
expectedOutput: []string{"value value2"},
|
||||
},
|
||||
{
|
||||
template: TemplateReplaceArg(0) + " " + TemplateReplaceArg(1),
|
||||
args: []string{"zero", "one"},
|
||||
expectedOutput: []string{"zero one"},
|
||||
},
|
||||
{
|
||||
template: "You just pulled " + TemplateReplaceArg(0),
|
||||
args: []string{"alpine"},
|
||||
expectedOutput: []string{"You just pulled alpine"},
|
||||
},
|
||||
{
|
||||
template: "one line\nanother line!",
|
||||
expectedOutput: []string{"one line", "another line!"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testCmd := &cobra.Command{
|
||||
Use: "pull",
|
||||
Args: cobra.ExactArgs(len(tc.args)),
|
||||
}
|
||||
for _, f := range tc.flags {
|
||||
_ = testCmd.Flags().String(f.name, "", "")
|
||||
err := testCmd.Flag(f.name).Value.Set(f.value)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
err := testCmd.Flags().Parse(tc.args)
|
||||
assert.NilError(t, err)
|
||||
|
||||
out, err := ParseTemplate(tc.template, testCmd)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, out, tc.expectedOutput)
|
||||
}
|
||||
}
|
||||
@ -41,9 +41,6 @@ func (e *pluginError) MarshalText() (text []byte, err error) {
|
||||
// wrapAsPluginError wraps an error in a pluginError with an
|
||||
// additional message, analogous to errors.Wrapf.
|
||||
func wrapAsPluginError(err error, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &pluginError{cause: errors.Wrap(err, msg)}
|
||||
}
|
||||
|
||||
|
||||
@ -1,166 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli-plugins/hooks"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// HookPluginData is the type representing the information
|
||||
// that plugins declaring support for hooks get passed when
|
||||
// being invoked following a CLI command execution.
|
||||
type HookPluginData struct {
|
||||
// RootCmd is a string representing the matching hook configuration
|
||||
// which is currently being invoked. If a hook for `docker context` is
|
||||
// configured and the user executes `docker context ls`, the plugin will
|
||||
// be invoked with `context`.
|
||||
RootCmd string
|
||||
Flags map[string]string
|
||||
CommandError string
|
||||
}
|
||||
|
||||
// RunCLICommandHooks is the entrypoint into the hooks execution flow after
|
||||
// a main CLI command was executed. It calls the hook subcommand for all
|
||||
// present CLI plugins that declare support for hooks in their metadata and
|
||||
// parses/prints their responses.
|
||||
func RunCLICommandHooks(dockerCli command.Cli, rootCmd, subCommand *cobra.Command, cmdErrorMessage string) {
|
||||
commandName := strings.TrimPrefix(subCommand.CommandPath(), rootCmd.Name()+" ")
|
||||
flags := getCommandFlags(subCommand)
|
||||
|
||||
runHooks(dockerCli, rootCmd, subCommand, commandName, flags, cmdErrorMessage)
|
||||
}
|
||||
|
||||
// RunPluginHooks is the entrypoint for the hooks execution flow
|
||||
// after a plugin command was just executed by the CLI.
|
||||
func RunPluginHooks(dockerCli command.Cli, rootCmd, subCommand *cobra.Command, args []string) {
|
||||
commandName := strings.Join(args, " ")
|
||||
flags := getNaiveFlags(args)
|
||||
|
||||
runHooks(dockerCli, rootCmd, subCommand, commandName, flags, "")
|
||||
}
|
||||
|
||||
func runHooks(dockerCli command.Cli, rootCmd, subCommand *cobra.Command, invokedCommand string, flags map[string]string, cmdErrorMessage string) {
|
||||
nextSteps := invokeAndCollectHooks(dockerCli, rootCmd, subCommand, invokedCommand, flags, cmdErrorMessage)
|
||||
|
||||
hooks.PrintNextSteps(dockerCli.Err(), nextSteps)
|
||||
}
|
||||
|
||||
func invokeAndCollectHooks(dockerCli command.Cli, rootCmd, subCmd *cobra.Command, subCmdStr string, flags map[string]string, cmdErrorMessage string) []string {
|
||||
pluginsCfg := dockerCli.ConfigFile().Plugins
|
||||
if pluginsCfg == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
nextSteps := make([]string, 0, len(pluginsCfg))
|
||||
for pluginName, cfg := range pluginsCfg {
|
||||
match, ok := pluginMatch(cfg, subCmdStr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
p, err := GetPlugin(pluginName, dockerCli, rootCmd)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hookReturn, err := p.RunHook(HookPluginData{
|
||||
RootCmd: match,
|
||||
Flags: flags,
|
||||
CommandError: cmdErrorMessage,
|
||||
})
|
||||
if err != nil {
|
||||
// skip misbehaving plugins, but don't halt execution
|
||||
continue
|
||||
}
|
||||
|
||||
var hookMessageData hooks.HookMessage
|
||||
err = json.Unmarshal(hookReturn, &hookMessageData)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// currently the only hook type
|
||||
if hookMessageData.Type != hooks.NextSteps {
|
||||
continue
|
||||
}
|
||||
|
||||
processedHook, err := hooks.ParseTemplate(hookMessageData.Template, subCmd)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
nextSteps = append(nextSteps, processedHook...)
|
||||
}
|
||||
return nextSteps
|
||||
}
|
||||
|
||||
// pluginMatch takes a plugin configuration and a string representing the
|
||||
// command being executed (such as 'image ls' – the root 'docker' is omitted)
|
||||
// and, if the configuration includes a hook for the invoked command, returns
|
||||
// the configured hook string.
|
||||
func pluginMatch(pluginCfg map[string]string, subCmd string) (string, bool) {
|
||||
configuredPluginHooks, ok := pluginCfg["hooks"]
|
||||
if !ok || configuredPluginHooks == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
commands := strings.Split(configuredPluginHooks, ",")
|
||||
for _, hookCmd := range commands {
|
||||
if hookMatch(hookCmd, subCmd) {
|
||||
return hookCmd, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func hookMatch(hookCmd, subCmd string) bool {
|
||||
hookCmdTokens := strings.Split(hookCmd, " ")
|
||||
subCmdTokens := strings.Split(subCmd, " ")
|
||||
|
||||
if len(hookCmdTokens) > len(subCmdTokens) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, v := range hookCmdTokens {
|
||||
if v != subCmdTokens[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getCommandFlags(cmd *cobra.Command) map[string]string {
|
||||
flags := make(map[string]string)
|
||||
cmd.Flags().Visit(func(f *pflag.Flag) {
|
||||
var fValue string
|
||||
if f.Value.Type() == "bool" {
|
||||
fValue = f.Value.String()
|
||||
}
|
||||
flags[f.Name] = fValue
|
||||
})
|
||||
return flags
|
||||
}
|
||||
|
||||
// getNaiveFlags string-matches argv and parses them into a map.
|
||||
// This is used when calling hooks after a plugin command, since
|
||||
// in this case we can't rely on the cobra command tree to parse
|
||||
// flags in this case. In this case, no values are ever passed,
|
||||
// since we don't have enough information to process them.
|
||||
func getNaiveFlags(args []string) map[string]string {
|
||||
flags := make(map[string]string)
|
||||
for _, arg := range args {
|
||||
if strings.HasPrefix(arg, "--") {
|
||||
flags[arg[2:]] = ""
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "-") {
|
||||
flags[arg[1:]] = ""
|
||||
}
|
||||
}
|
||||
return flags
|
||||
}
|
||||
@ -1,110 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestGetNaiveFlags(t *testing.T) {
|
||||
testCases := []struct {
|
||||
args []string
|
||||
expectedFlags map[string]string
|
||||
}{
|
||||
{
|
||||
args: []string{"docker"},
|
||||
expectedFlags: map[string]string{},
|
||||
},
|
||||
{
|
||||
args: []string{"docker", "build", "-q", "--file", "test.Dockerfile", "."},
|
||||
expectedFlags: map[string]string{
|
||||
"q": "",
|
||||
"file": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
args: []string{"docker", "--context", "a-context", "pull", "-q", "--progress", "auto", "alpine"},
|
||||
expectedFlags: map[string]string{
|
||||
"context": "",
|
||||
"q": "",
|
||||
"progress": "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
assert.DeepEqual(t, getNaiveFlags(tc.args), tc.expectedFlags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
commandString string
|
||||
pluginConfig map[string]string
|
||||
expectedMatch string
|
||||
expectedOk bool
|
||||
}{
|
||||
{
|
||||
commandString: "image ls",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "image",
|
||||
},
|
||||
expectedMatch: "image",
|
||||
expectedOk: true,
|
||||
},
|
||||
{
|
||||
commandString: "context ls",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "build",
|
||||
},
|
||||
expectedMatch: "",
|
||||
expectedOk: false,
|
||||
},
|
||||
{
|
||||
commandString: "context ls",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "context ls",
|
||||
},
|
||||
expectedMatch: "context ls",
|
||||
expectedOk: true,
|
||||
},
|
||||
{
|
||||
commandString: "image ls",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "image ls,image",
|
||||
},
|
||||
expectedMatch: "image ls",
|
||||
expectedOk: true,
|
||||
},
|
||||
{
|
||||
commandString: "image ls",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "",
|
||||
},
|
||||
expectedMatch: "",
|
||||
expectedOk: false,
|
||||
},
|
||||
{
|
||||
commandString: "image inspect",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "image i",
|
||||
},
|
||||
expectedMatch: "",
|
||||
expectedOk: false,
|
||||
},
|
||||
{
|
||||
commandString: "image inspect",
|
||||
pluginConfig: map[string]string{
|
||||
"hooks": "image",
|
||||
},
|
||||
expectedMatch: "image",
|
||||
expectedOk: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
match, ok := pluginMatch(tc.pluginConfig, tc.commandString)
|
||||
assert.Equal(t, ok, tc.expectedOk)
|
||||
assert.Equal(t, match, tc.expectedMatch)
|
||||
}
|
||||
}
|
||||
@ -240,7 +240,8 @@ func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
cmd.Env = append(cmd.Environ(), ReexecEnvvar+"="+os.Args[0])
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, ReexecEnvvar+"="+os.Args[0])
|
||||
cmd.Env = appendPluginResourceAttributesEnvvar(cmd.Env, rootcmd, plugin)
|
||||
|
||||
return cmd, nil
|
||||
|
||||
@ -8,11 +8,6 @@ const (
|
||||
// which must be supported by every plugin and returns the
|
||||
// plugin metadata.
|
||||
MetadataSubcommandName = "docker-cli-plugin-metadata"
|
||||
|
||||
// HookSubcommandName is the name of the plugin subcommand
|
||||
// which must be implemented by plugins declaring support
|
||||
// for hooks in their metadata.
|
||||
HookSubcommandName = "docker-cli-plugin-hooks"
|
||||
)
|
||||
|
||||
// Metadata provided by the plugin.
|
||||
|
||||
@ -2,8 +2,6 @@ package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -102,22 +100,3 @@ func newPlugin(c Candidate, cmds []*cobra.Command) (Plugin, error) {
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// RunHook executes the plugin's hooks command
|
||||
// and returns its unprocessed output.
|
||||
func (p *Plugin) RunHook(hookData HookPluginData) ([]byte, error) {
|
||||
hDataBytes, err := json.Marshal(hookData)
|
||||
if err != nil {
|
||||
return nil, wrapAsPluginError(err, "failed to marshall hook data")
|
||||
}
|
||||
|
||||
pCmd := exec.Command(p.Path, p.Name, HookSubcommandName, string(hDataBytes))
|
||||
pCmd.Env = os.Environ()
|
||||
pCmd.Env = append(pCmd.Env, ReexecEnvvar+"="+os.Args[0])
|
||||
hookCmdOutput, err := pCmd.Output()
|
||||
if err != nil {
|
||||
return nil, wrapAsPluginError(err, "failed to execute plugin hook subcommand")
|
||||
}
|
||||
|
||||
return hookCmdOutput, nil
|
||||
}
|
||||
|
||||
@ -12,17 +12,15 @@ import (
|
||||
"github.com/docker/cli/cli-plugins/socket"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
// PersistentPreRunE must be called by any plugin command (or
|
||||
// subcommand) which uses the cobra `PersistentPreRun*` hook. Plugins
|
||||
// which do not make use of `PersistentPreRun*` do not need to call
|
||||
// this (although it remains safe to do so). Plugins are recommended
|
||||
// to use `PersistentPreRunE` to enable the error to be
|
||||
// to use `PersistenPreRunE` to enable the error to be
|
||||
// returned. Should not be called outside of a command's
|
||||
// PersistentPreRunE hook and must not be run unless Run has been
|
||||
// called.
|
||||
@ -68,8 +66,6 @@ func RunPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager
|
||||
|
||||
// Run is the top-level entry point to the CLI plugin framework. It should be called from your plugin's `main()` function.
|
||||
func Run(makeCmd func(command.Cli) *cobra.Command, meta manager.Metadata) {
|
||||
otel.SetErrorHandler(debug.OTELErrorHandler)
|
||||
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
|
||||
@ -7,114 +7,24 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EnvKey represents the well-known environment variable used to pass the
|
||||
// plugin being executed the socket name it should listen on to coordinate with
|
||||
// the host CLI.
|
||||
// EnvKey represents the well-known environment variable used to pass the plugin being
|
||||
// executed the socket name it should listen on to coordinate with the host CLI.
|
||||
const EnvKey = "DOCKER_CLI_PLUGIN_SOCKET"
|
||||
|
||||
// NewPluginServer creates a plugin server that listens on a new Unix domain
|
||||
// socket. h is called for each new connection to the socket in a goroutine.
|
||||
func NewPluginServer(h func(net.Conn)) (*PluginServer, error) {
|
||||
// Listen on a Unix socket, with the address being platform-dependent.
|
||||
// When a non-abstract address is used, Go will unlink(2) the socket
|
||||
// for us once the listener is closed, as documented in
|
||||
// [net.UnixListener.SetUnlinkOnClose].
|
||||
l, err := net.ListenUnix("unix", &net.UnixAddr{
|
||||
Name: socketName("docker_cli_" + randomID()),
|
||||
Net: "unix",
|
||||
})
|
||||
// SetupConn sets up a Unix socket listener, establishes a goroutine to handle connections
|
||||
// and update the conn pointer, and returns the listener for the socket (which the caller
|
||||
// is responsible for closing when it's no longer needed).
|
||||
func SetupConn(conn **net.UnixConn) (*net.UnixListener, error) {
|
||||
listener, err := listen("docker_cli_" + randomID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h == nil {
|
||||
h = func(net.Conn) {}
|
||||
}
|
||||
accept(listener, conn)
|
||||
|
||||
pl := &PluginServer{
|
||||
l: l,
|
||||
h: h,
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer pl.Close()
|
||||
for {
|
||||
err := pl.accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return pl, nil
|
||||
}
|
||||
|
||||
type PluginServer struct {
|
||||
mu sync.Mutex
|
||||
conns []net.Conn
|
||||
l *net.UnixListener
|
||||
h func(net.Conn)
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (pl *PluginServer) accept() error {
|
||||
conn, err := pl.l.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pl.mu.Lock()
|
||||
defer pl.mu.Unlock()
|
||||
|
||||
if pl.closed {
|
||||
// Handle potential race between Close and accept.
|
||||
conn.Close()
|
||||
return errors.New("plugin server is closed")
|
||||
}
|
||||
|
||||
pl.conns = append(pl.conns, conn)
|
||||
|
||||
go pl.h(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns the [net.Addr] of the underlying [net.Listener].
|
||||
func (pl *PluginServer) Addr() net.Addr {
|
||||
return pl.l.Addr()
|
||||
}
|
||||
|
||||
// Close ensures that the server is no longer accepting new connections and
|
||||
// closes all existing connections. Existing connections will receive [io.EOF].
|
||||
//
|
||||
// The error value is that of the underlying [net.Listner.Close] call.
|
||||
func (pl *PluginServer) Close() error {
|
||||
// Close connections first to ensure the connections get io.EOF instead
|
||||
// of a connection reset.
|
||||
pl.closeAllConns()
|
||||
|
||||
// Try to ensure that any active connections have a chance to receive
|
||||
// io.EOF.
|
||||
runtime.Gosched()
|
||||
|
||||
return pl.l.Close()
|
||||
}
|
||||
|
||||
func (pl *PluginServer) closeAllConns() {
|
||||
pl.mu.Lock()
|
||||
defer pl.mu.Unlock()
|
||||
|
||||
// Prevent new connections from being accepted.
|
||||
pl.closed = true
|
||||
|
||||
for _, conn := range pl.conns {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
pl.conns = nil
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func randomID() string {
|
||||
@ -125,6 +35,18 @@ func randomID() string {
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
func accept(listener *net.UnixListener, conn **net.UnixConn) {
|
||||
go func() {
|
||||
for {
|
||||
// ignore error here, if we failed to accept a connection,
|
||||
// conn is nil and we fallback to previous behavior
|
||||
*conn, _ = listener.AcceptUnix()
|
||||
// perform any platform-specific actions on accept (e.g. unlink non-abstract sockets)
|
||||
onAccept(*conn, listener)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// ConnectAndWait connects to the socket passed via well-known env var,
|
||||
// if present, and attempts to read from it until it receives an EOF, at which
|
||||
// point cb is called.
|
||||
|
||||
@ -1,9 +0,0 @@
|
||||
//go:build windows || linux
|
||||
|
||||
package socket
|
||||
|
||||
func socketName(basename string) string {
|
||||
// Address of an abstract socket -- this socket can be opened by name,
|
||||
// but is not present in the filesystem.
|
||||
return "@" + basename
|
||||
}
|
||||
19
cli-plugins/socket/socket_darwin.go
Normal file
19
cli-plugins/socket/socket_darwin.go
Normal file
@ -0,0 +1,19 @@
|
||||
package socket
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func listen(socketname string) (*net.UnixListener, error) {
|
||||
return net.ListenUnix("unix", &net.UnixAddr{
|
||||
Name: filepath.Join(os.TempDir(), socketname),
|
||||
Net: "unix",
|
||||
})
|
||||
}
|
||||
|
||||
func onAccept(conn *net.UnixConn, listener *net.UnixListener) {
|
||||
syscall.Unlink(listener.Addr().String())
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
//go:build !windows && !linux
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func socketName(basename string) string {
|
||||
// Because abstract sockets are unavailable, use a socket path in the
|
||||
// system temporary directory.
|
||||
return filepath.Join(os.TempDir(), basename)
|
||||
}
|
||||
20
cli-plugins/socket/socket_nodarwin.go
Normal file
20
cli-plugins/socket/socket_nodarwin.go
Normal file
@ -0,0 +1,20 @@
|
||||
//go:build !darwin && !openbsd
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func listen(socketname string) (*net.UnixListener, error) {
|
||||
return net.ListenUnix("unix", &net.UnixAddr{
|
||||
Name: "@" + socketname,
|
||||
Net: "unix",
|
||||
})
|
||||
}
|
||||
|
||||
func onAccept(conn *net.UnixConn, listener *net.UnixListener) {
|
||||
// do nothing
|
||||
// while on darwin and OpenBSD we would unlink here;
|
||||
// on non-darwin the socket is abstract and not present on the filesystem
|
||||
}
|
||||
19
cli-plugins/socket/socket_openbsd.go
Normal file
19
cli-plugins/socket/socket_openbsd.go
Normal file
@ -0,0 +1,19 @@
|
||||
package socket
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func listen(socketname string) (*net.UnixListener, error) {
|
||||
return net.ListenUnix("unix", &net.UnixAddr{
|
||||
Name: filepath.Join(os.TempDir(), socketname),
|
||||
Net: "unix",
|
||||
})
|
||||
}
|
||||
|
||||
func onAccept(conn *net.UnixConn, listener *net.UnixListener) {
|
||||
syscall.Unlink(listener.Addr().String())
|
||||
}
|
||||
@ -1,14 +1,11 @@
|
||||
package socket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -16,110 +13,54 @@ import (
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func TestPluginServer(t *testing.T) {
|
||||
t.Run("connection closes with EOF when server closes", func(t *testing.T) {
|
||||
called := make(chan struct{})
|
||||
srv, err := NewPluginServer(func(_ net.Conn) { close(called) })
|
||||
func TestSetupConn(t *testing.T) {
|
||||
t.Run("updates conn when connected", func(t *testing.T) {
|
||||
var conn *net.UnixConn
|
||||
listener, err := SetupConn(&conn)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, srv != nil, "returned nil server but no error")
|
||||
assert.Check(t, listener != nil, "returned nil listener but no error")
|
||||
addr, err := net.ResolveUnixAddr("unix", listener.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve listener address")
|
||||
|
||||
addr, err := net.ResolveUnixAddr("unix", srv.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve server address")
|
||||
_, err = net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to dial returned listener")
|
||||
|
||||
conn, err := net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to dial returned server")
|
||||
defer conn.Close()
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := conn.Read(make([]byte, 1))
|
||||
done <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-called:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("handler not called")
|
||||
}
|
||||
|
||||
srv.Close()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
if !errors.Is(err, io.EOF) {
|
||||
t.Fatalf("exepcted EOF error, got: %v", err)
|
||||
}
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
pollConnNotNil(t, &conn)
|
||||
})
|
||||
|
||||
t.Run("allows reconnects", func(t *testing.T) {
|
||||
var calls int32
|
||||
h := func(_ net.Conn) {
|
||||
atomic.AddInt32(&calls, 1)
|
||||
}
|
||||
|
||||
srv, err := NewPluginServer(h)
|
||||
var conn *net.UnixConn
|
||||
listener, err := SetupConn(&conn)
|
||||
assert.NilError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
assert.Check(t, srv.Addr() != nil, "returned nil addr but no error")
|
||||
|
||||
addr, err := net.ResolveUnixAddr("unix", srv.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve server address")
|
||||
|
||||
waitForCalls := func(n int) {
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
if atomic.LoadInt32(&calls) == int32(n) {
|
||||
return poll.Success()
|
||||
}
|
||||
return poll.Continue("waiting for handler to be called")
|
||||
})
|
||||
}
|
||||
assert.Check(t, listener != nil, "returned nil listener but no error")
|
||||
addr, err := net.ResolveUnixAddr("unix", listener.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve listener address")
|
||||
|
||||
otherConn, err := net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to dial returned server")
|
||||
assert.NilError(t, err, "failed to dial returned listener")
|
||||
|
||||
otherConn.Close()
|
||||
waitForCalls(1)
|
||||
|
||||
conn, err := net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to redial server")
|
||||
defer conn.Close()
|
||||
waitForCalls(2)
|
||||
|
||||
// and again but don't close the existing connection
|
||||
conn2, err := net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to redial server")
|
||||
defer conn2.Close()
|
||||
waitForCalls(3)
|
||||
|
||||
srv.Close()
|
||||
|
||||
// now make sure we get EOF on the existing connections
|
||||
buf := make([]byte, 1)
|
||||
_, err = conn.Read(buf)
|
||||
assert.ErrorIs(t, err, io.EOF, "expected EOF error, got: %v", err)
|
||||
|
||||
_, err = conn2.Read(buf)
|
||||
assert.ErrorIs(t, err, io.EOF, "expected EOF error, got: %v", err)
|
||||
_, err = net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to redial listener")
|
||||
})
|
||||
|
||||
t.Run("does not leak sockets to local directory", func(t *testing.T) {
|
||||
srv, err := NewPluginServer(nil)
|
||||
var conn *net.UnixConn
|
||||
listener, err := SetupConn(&conn)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, srv != nil, "returned nil server but no error")
|
||||
checkDirNoNewPluginServer(t)
|
||||
|
||||
addr, err := net.ResolveUnixAddr("unix", srv.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve server address")
|
||||
assert.Check(t, listener != nil, "returned nil listener but no error")
|
||||
checkDirNoPluginSocket(t)
|
||||
|
||||
addr, err := net.ResolveUnixAddr("unix", listener.Addr().String())
|
||||
assert.NilError(t, err, "failed to resolve listener address")
|
||||
_, err = net.DialUnix("unix", nil, addr)
|
||||
assert.NilError(t, err, "failed to dial returned server")
|
||||
checkDirNoNewPluginServer(t)
|
||||
assert.NilError(t, err, "failed to dial returned listener")
|
||||
checkDirNoPluginSocket(t)
|
||||
})
|
||||
}
|
||||
|
||||
func checkDirNoNewPluginServer(t *testing.T) {
|
||||
func checkDirNoPluginSocket(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
files, err := os.ReadDir(".")
|
||||
@ -137,24 +78,18 @@ func checkDirNoNewPluginServer(t *testing.T) {
|
||||
|
||||
func TestConnectAndWait(t *testing.T) {
|
||||
t.Run("calls cancel func on EOF", func(t *testing.T) {
|
||||
srv, err := NewPluginServer(nil)
|
||||
assert.NilError(t, err, "failed to setup server")
|
||||
defer srv.Close()
|
||||
var conn *net.UnixConn
|
||||
listener, err := SetupConn(&conn)
|
||||
assert.NilError(t, err, "failed to setup listener")
|
||||
|
||||
done := make(chan struct{})
|
||||
t.Setenv(EnvKey, srv.Addr().String())
|
||||
t.Setenv(EnvKey, listener.Addr().String())
|
||||
cancelFunc := func() {
|
||||
done <- struct{}{}
|
||||
}
|
||||
ConnectAndWait(cancelFunc)
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("unexpectedly done")
|
||||
default:
|
||||
}
|
||||
|
||||
srv.Close()
|
||||
pollConnNotNil(t, &conn)
|
||||
conn.Close()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
@ -166,19 +101,17 @@ func TestConnectAndWait(t *testing.T) {
|
||||
// TODO: this test cannot be executed with `t.Parallel()`, due to
|
||||
// relying on goroutine numbers to ensure correct behaviour
|
||||
t.Run("connect goroutine exits after EOF", func(t *testing.T) {
|
||||
srv, err := NewPluginServer(nil)
|
||||
assert.NilError(t, err, "failed to setup server")
|
||||
|
||||
defer srv.Close()
|
||||
|
||||
t.Setenv(EnvKey, srv.Addr().String())
|
||||
var conn *net.UnixConn
|
||||
listener, err := SetupConn(&conn)
|
||||
assert.NilError(t, err, "failed to setup listener")
|
||||
t.Setenv(EnvKey, listener.Addr().String())
|
||||
numGoroutines := runtime.NumGoroutine()
|
||||
|
||||
ConnectAndWait(func() {})
|
||||
assert.Equal(t, runtime.NumGoroutine(), numGoroutines+1)
|
||||
|
||||
srv.Close()
|
||||
|
||||
pollConnNotNil(t, &conn)
|
||||
conn.Close()
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
if runtime.NumGoroutine() > numGoroutines+1 {
|
||||
return poll.Continue("waiting for connect goroutine to exit")
|
||||
@ -187,3 +120,14 @@ func TestConnectAndWait(t *testing.T) {
|
||||
}, poll.WithDelay(1*time.Millisecond), poll.WithTimeout(10*time.Millisecond))
|
||||
})
|
||||
}
|
||||
|
||||
func pollConnNotNil(t *testing.T, conn **net.UnixConn) {
|
||||
t.Helper()
|
||||
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
if *conn == nil {
|
||||
return poll.Continue("waiting for conn to not be nil")
|
||||
}
|
||||
return poll.Success()
|
||||
}, poll.WithDelay(1*time.Millisecond), poll.WithTimeout(10*time.Millisecond))
|
||||
}
|
||||
|
||||
@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@ -11,7 +10,6 @@ import (
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -69,13 +67,9 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
warning = allCacheWarning
|
||||
}
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning)
|
||||
if err != nil {
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning); !r || err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
if !r {
|
||||
return 0, "", errdefs.Cancelled(errors.New("builder prune has been cancelled"))
|
||||
}
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().BuildCachePrune(ctx, types.BuildCachePruneOptions{
|
||||
|
||||
@ -5,8 +5,10 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestBuilderPromptTermination(t *testing.T) {
|
||||
@ -19,5 +21,8 @@ func TestBuilderPromptTermination(t *testing.T) {
|
||||
},
|
||||
})
|
||||
cmd := NewPruneCommand(cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -65,7 +65,6 @@ type Cli interface {
|
||||
ContextStore() store.Store
|
||||
CurrentContext() string
|
||||
DockerEndpoint() docker.Endpoint
|
||||
TelemetryClient
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
@ -86,7 +85,6 @@ type DockerCli struct {
|
||||
dockerEndpoint docker.Endpoint
|
||||
contextStoreConfig store.Config
|
||||
initTimeout time.Duration
|
||||
res telemetryResource
|
||||
|
||||
// baseCtx is the base context used for internal operations. In the future
|
||||
// this may be replaced by explicitly passing a context to functions that
|
||||
@ -189,36 +187,6 @@ func (cli *DockerCli) BuildKitEnabled() (bool, error) {
|
||||
return cli.ServerInfo().OSType != "windows", nil
|
||||
}
|
||||
|
||||
// HooksEnabled returns whether plugin hooks are enabled.
|
||||
func (cli *DockerCli) HooksEnabled() bool {
|
||||
// legacy support DOCKER_CLI_HINTS env var
|
||||
if v := os.Getenv("DOCKER_CLI_HINTS"); v != "" {
|
||||
enabled, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return enabled
|
||||
}
|
||||
// use DOCKER_CLI_HOOKS env var value if set and not empty
|
||||
if v := os.Getenv("DOCKER_CLI_HOOKS"); v != "" {
|
||||
enabled, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return enabled
|
||||
}
|
||||
featuresMap := cli.ConfigFile().Features
|
||||
if v, ok := featuresMap["hooks"]; ok {
|
||||
enabled, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return enabled
|
||||
}
|
||||
// default to false
|
||||
return false
|
||||
}
|
||||
|
||||
// ManifestStore returns a store for local manifests
|
||||
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
||||
// TODO: support override default location from config file
|
||||
|
||||
@ -307,56 +307,3 @@ func TestInitializeShouldAlwaysCreateTheContextStore(t *testing.T) {
|
||||
})))
|
||||
assert.Check(t, cli.ContextStore() != nil)
|
||||
}
|
||||
|
||||
func TestHooksEnabled(t *testing.T) {
|
||||
t.Run("disabled by default", func(t *testing.T) {
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, !cli.HooksEnabled())
|
||||
})
|
||||
|
||||
t.Run("enabled in configFile", func(t *testing.T) {
|
||||
configFile := `{
|
||||
"features": {
|
||||
"hooks": "true"
|
||||
}}`
|
||||
dir := fs.NewDir(t, "", fs.WithFile("config.json", configFile))
|
||||
defer dir.Remove()
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
config.SetDir(dir.Path())
|
||||
|
||||
assert.Check(t, cli.HooksEnabled())
|
||||
})
|
||||
|
||||
t.Run("env var overrides configFile", func(t *testing.T) {
|
||||
configFile := `{
|
||||
"features": {
|
||||
"hooks": "true"
|
||||
}}`
|
||||
t.Setenv("DOCKER_CLI_HOOKS", "false")
|
||||
dir := fs.NewDir(t, "", fs.WithFile("config.json", configFile))
|
||||
defer dir.Remove()
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
config.SetDir(dir.Path())
|
||||
|
||||
assert.Check(t, !cli.HooksEnabled())
|
||||
})
|
||||
|
||||
t.Run("legacy env var overrides configFile", func(t *testing.T) {
|
||||
configFile := `{
|
||||
"features": {
|
||||
"hooks": "true"
|
||||
}}`
|
||||
t.Setenv("DOCKER_CLI_HINTS", "false")
|
||||
dir := fs.NewDir(t, "", fs.WithFile("config.json", configFile))
|
||||
defer dir.Remove()
|
||||
cli, err := NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
config.SetDir(dir.Path())
|
||||
|
||||
assert.Check(t, !cli.HooksEnabled())
|
||||
})
|
||||
}
|
||||
|
||||
@ -8,9 +8,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/errdefs"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -56,13 +54,9 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
pruneFilters := command.PruneFilters(dockerCli, options.filter.Value())
|
||||
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning)
|
||||
if err != nil {
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning); !r || err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
if !r {
|
||||
return 0, "", errdefs.Cancelled(errors.New("container prune has been cancelled"))
|
||||
}
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().ContainersPrune(ctx, pruneFilters)
|
||||
|
||||
@ -4,10 +4,12 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestContainerPrunePromptTermination(t *testing.T) {
|
||||
@ -20,5 +22,8 @@ func TestContainerPrunePromptTermination(t *testing.T) {
|
||||
},
|
||||
})
|
||||
cmd := NewPruneCommand(cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -186,11 +186,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption
|
||||
defer closeFn()
|
||||
}
|
||||
|
||||
// New context here because we don't to cancel waiting on container exit/remove
|
||||
// when we cancel attach, etc.
|
||||
statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(ctx))
|
||||
defer cancelStatusCtx()
|
||||
statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove)
|
||||
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, copts.autoRemove)
|
||||
|
||||
// start the container
|
||||
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
|
||||
|
||||
@ -2,7 +2,6 @@ package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -36,10 +35,7 @@ func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containe
|
||||
|
||||
statusC := make(chan int)
|
||||
go func() {
|
||||
defer close(statusC)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case result := <-resultC:
|
||||
if result.Error != nil {
|
||||
logrus.Errorf("Error waiting for container: %v", result.Error.Message)
|
||||
@ -48,9 +44,6 @@ func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containe
|
||||
statusC <- int(result.StatusCode)
|
||||
}
|
||||
case err := <-errC:
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
logrus.Errorf("error waiting for container: %v", err)
|
||||
statusC <- 125
|
||||
}
|
||||
|
||||
@ -10,9 +10,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/errdefs"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -70,13 +68,9 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
warning = allImageWarning
|
||||
}
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning)
|
||||
if err != nil {
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning); !r || err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
if !r {
|
||||
return 0, "", errdefs.Cancelled(errors.New("image prune has been cancelled"))
|
||||
}
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().ImagesPrune(ctx, pruneFilters)
|
||||
|
||||
@ -4,10 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@ -95,18 +94,13 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc})
|
||||
// when prompted, answer "Y" to confirm the prune.
|
||||
// will not be prompted if --force is used.
|
||||
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader("Y\n"))))
|
||||
cmd := NewPruneCommand(cli)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("prune-command-success.%s.golden", tc.name))
|
||||
})
|
||||
cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc})
|
||||
cmd := NewPruneCommand(cli)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("prune-command-success.%s.golden", tc.name))
|
||||
}
|
||||
}
|
||||
|
||||
@ -120,5 +114,8 @@ func TestPrunePromptTermination(t *testing.T) {
|
||||
},
|
||||
})
|
||||
cmd := NewPruneCommand(cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -7,8 +7,6 @@ import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -52,13 +50,9 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
pruneFilters := command.PruneFilters(dockerCli, options.filter.Value())
|
||||
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning)
|
||||
if err != nil {
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning); !r || err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !r {
|
||||
return "", errdefs.Cancelled(errors.New("network prune cancelled has been cancelled"))
|
||||
}
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().NetworksPrune(ctx, pruneFilters)
|
||||
|
||||
@ -4,10 +4,12 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestNetworkPrunePromptTermination(t *testing.T) {
|
||||
@ -20,5 +22,8 @@ func TestNetworkPrunePromptTermination(t *testing.T) {
|
||||
},
|
||||
})
|
||||
cmd := NewPruneCommand(cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@ -114,5 +115,8 @@ func TestNetworkRemovePromptTermination(t *testing.T) {
|
||||
})
|
||||
cmd := newRemoveCommand(cli)
|
||||
cmd.SetArgs([]string{"existing-network"})
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,2 +1,2 @@
|
||||
Upgrading plugin foo/bar from localhost:5000/foo/bar:v0.1.0 to localhost:5000/foo/bar:v1.0.0
|
||||
Plugin images do not match, are you sure? [y/N]
|
||||
Plugin images do not match, are you sure? [y/N]
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@ -64,12 +63,11 @@ func runUpgrade(ctx context.Context, dockerCli command.Cli, opts pluginOptions)
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, reference.FamiliarString(old), reference.FamiliarString(remote))
|
||||
if !opts.skipRemoteCheck && remote.String() != old.String() {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), "Plugin images do not match, are you sure?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !r {
|
||||
return errdefs.Cancelled(errors.New("plugin upgrade has been cancelled"))
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), "Plugin images do not match, are you sure?"); !r || err != nil {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "canceling upgrade request")
|
||||
}
|
||||
return errors.New("canceling upgrade request")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5,9 +5,11 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
@ -32,6 +34,9 @@ func TestUpgradePromptTermination(t *testing.T) {
|
||||
// need to set a remote address that does not match the plugin
|
||||
// reference sent by the `pluginInspectFunc`
|
||||
cmd.SetArgs([]string{"foo/bar", "localhost:5000/foo/bar:v1.0.0"})
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
golden.Assert(t, cli.OutBuffer().String(), "plugin-upgrade-terminate.golden")
|
||||
}
|
||||
|
||||
@ -17,10 +17,8 @@ import (
|
||||
"github.com/docker/cli/cli/command/volume"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/fvbommel/sortorder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -77,13 +75,9 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`)
|
||||
}
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), confirmationMessage(dockerCli, options))
|
||||
if err != nil {
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), confirmationMessage(dockerCli, options)); !r || err != nil {
|
||||
return err
|
||||
}
|
||||
if !r {
|
||||
return errdefs.Cancelled(errors.New("system prune has been cancelled"))
|
||||
}
|
||||
}
|
||||
pruneFuncs := []func(ctx context.Context, dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error){
|
||||
container.RunPrune,
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -17,7 +18,7 @@ func TestPrunePromptPre131DoesNotIncludeBuildCache(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{version: "1.30"})
|
||||
cmd := newPruneCommand(cli)
|
||||
cmd.SetArgs([]string{})
|
||||
assert.ErrorContains(t, cmd.Execute(), "system prune has been cancelled")
|
||||
assert.NilError(t, cmd.Execute())
|
||||
expected := `WARNING! This will remove:
|
||||
- all stopped containers
|
||||
- all networks not used by at least one container
|
||||
@ -35,7 +36,7 @@ func TestPrunePromptFilters(t *testing.T) {
|
||||
cmd := newPruneCommand(cli)
|
||||
cmd.SetArgs([]string{"--filter", "until=24h", "--filter", "label=hello-world", "--filter", "label!=foo=bar", "--filter", "label=bar=baz"})
|
||||
|
||||
assert.ErrorContains(t, cmd.Execute(), "system prune has been cancelled")
|
||||
assert.NilError(t, cmd.Execute())
|
||||
expected := `WARNING! This will remove:
|
||||
- all stopped containers
|
||||
- all networks not used by at least one container
|
||||
@ -68,5 +69,8 @@ func TestSystemPrunePromptTermination(t *testing.T) {
|
||||
})
|
||||
|
||||
cmd := newPruneCommand(cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,202 +0,0 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const exportTimeout = 50 * time.Millisecond
|
||||
|
||||
// TracerProvider is an extension of the trace.TracerProvider interface for CLI programs.
|
||||
type TracerProvider interface {
|
||||
trace.TracerProvider
|
||||
ForceFlush(ctx context.Context) error
|
||||
Shutdown(ctx context.Context) error
|
||||
}
|
||||
|
||||
// MeterProvider is an extension of the metric.MeterProvider interface for CLI programs.
|
||||
type MeterProvider interface {
|
||||
metric.MeterProvider
|
||||
ForceFlush(ctx context.Context) error
|
||||
Shutdown(ctx context.Context) error
|
||||
}
|
||||
|
||||
// TelemetryClient provides the methods for using OTEL tracing or metrics.
|
||||
type TelemetryClient interface {
|
||||
// Resource returns the OTEL Resource configured with this TelemetryClient.
|
||||
// This resource may be created lazily, but the resource should be the same
|
||||
// each time this function is invoked.
|
||||
Resource() *resource.Resource
|
||||
|
||||
// TracerProvider returns a TracerProvider. This TracerProvider will be configured
|
||||
// with the default tracing components for a CLI program along with any options given
|
||||
// for the SDK.
|
||||
TracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) TracerProvider
|
||||
|
||||
// MeterProvider returns a MeterProvider. This MeterProvider will be configured
|
||||
// with the default metric components for a CLI program along with any options given
|
||||
// for the SDK.
|
||||
MeterProvider(ctx context.Context, opts ...sdkmetric.Option) MeterProvider
|
||||
}
|
||||
|
||||
func (cli *DockerCli) Resource() *resource.Resource {
|
||||
return cli.res.Get()
|
||||
}
|
||||
|
||||
func (cli *DockerCli) TracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) TracerProvider {
|
||||
allOpts := make([]sdktrace.TracerProviderOption, 0, len(opts)+2)
|
||||
allOpts = append(allOpts, sdktrace.WithResource(cli.Resource()))
|
||||
allOpts = append(allOpts, dockerSpanExporter(ctx, cli)...)
|
||||
allOpts = append(allOpts, opts...)
|
||||
return sdktrace.NewTracerProvider(allOpts...)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) MeterProvider(ctx context.Context, opts ...sdkmetric.Option) MeterProvider {
|
||||
allOpts := make([]sdkmetric.Option, 0, len(opts)+2)
|
||||
allOpts = append(allOpts, sdkmetric.WithResource(cli.Resource()))
|
||||
allOpts = append(allOpts, dockerMetricExporter(ctx, cli)...)
|
||||
allOpts = append(allOpts, opts...)
|
||||
return sdkmetric.NewMeterProvider(allOpts...)
|
||||
}
|
||||
|
||||
// WithResourceOptions configures additional options for the default resource. The default
|
||||
// resource will continue to include its default options.
|
||||
func WithResourceOptions(opts ...resource.Option) CLIOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.res.AppendOptions(opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithResource overwrites the default resource and prevents its creation.
|
||||
func WithResource(res *resource.Resource) CLIOption {
|
||||
return func(cli *DockerCli) error {
|
||||
cli.res.Set(res)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type telemetryResource struct {
|
||||
res *resource.Resource
|
||||
opts []resource.Option
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (r *telemetryResource) Set(res *resource.Resource) {
|
||||
r.res = res
|
||||
}
|
||||
|
||||
func (r *telemetryResource) Get() *resource.Resource {
|
||||
r.once.Do(r.init)
|
||||
return r.res
|
||||
}
|
||||
|
||||
func (r *telemetryResource) init() {
|
||||
if r.res != nil {
|
||||
r.opts = nil
|
||||
return
|
||||
}
|
||||
|
||||
opts := append(defaultResourceOptions(), r.opts...)
|
||||
res, err := resource.New(context.Background(), opts...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
r.res = res
|
||||
|
||||
// Clear the resource options since they'll never be used again and to allow
|
||||
// the garbage collector to retrieve that memory.
|
||||
r.opts = nil
|
||||
}
|
||||
|
||||
func defaultResourceOptions() []resource.Option {
|
||||
return []resource.Option{
|
||||
resource.WithDetectors(serviceNameDetector{}),
|
||||
resource.WithAttributes(
|
||||
// Use a unique instance id so OTEL knows that each invocation
|
||||
// of the CLI is its own instance. Without this, downstream
|
||||
// OTEL processors may think the same process is restarting
|
||||
// continuously.
|
||||
semconv.ServiceInstanceID(uuid.Generate().String()),
|
||||
),
|
||||
resource.WithFromEnv(),
|
||||
resource.WithTelemetrySDK(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *telemetryResource) AppendOptions(opts ...resource.Option) {
|
||||
if r.res != nil {
|
||||
return
|
||||
}
|
||||
r.opts = append(r.opts, opts...)
|
||||
}
|
||||
|
||||
type serviceNameDetector struct{}
|
||||
|
||||
func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) {
|
||||
return resource.StringDetector(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey,
|
||||
func() (string, error) {
|
||||
return filepath.Base(os.Args[0]), nil
|
||||
},
|
||||
).Detect(ctx)
|
||||
}
|
||||
|
||||
// cliReader is an implementation of Reader that will automatically
|
||||
// report to a designated Exporter when Shutdown is called.
|
||||
type cliReader struct {
|
||||
sdkmetric.Reader
|
||||
exporter sdkmetric.Exporter
|
||||
}
|
||||
|
||||
func newCLIReader(exp sdkmetric.Exporter) sdkmetric.Reader {
|
||||
reader := sdkmetric.NewManualReader(
|
||||
sdkmetric.WithTemporalitySelector(deltaTemporality),
|
||||
)
|
||||
return &cliReader{
|
||||
Reader: reader,
|
||||
exporter: exp,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *cliReader) Shutdown(ctx context.Context) error {
|
||||
var rm metricdata.ResourceMetrics
|
||||
if err := r.Reader.Collect(ctx, &rm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Place a pretty tight constraint on the actual reporting.
|
||||
// We don't want CLI metrics to prevent the CLI from exiting
|
||||
// so if there's some kind of issue we need to abort pretty
|
||||
// quickly.
|
||||
ctx, cancel := context.WithTimeout(ctx, exportTimeout)
|
||||
defer cancel()
|
||||
|
||||
return r.exporter.Export(ctx, &rm)
|
||||
}
|
||||
|
||||
// deltaTemporality sets the Temporality of every instrument to delta.
|
||||
//
|
||||
// This isn't really needed since we create a unique resource on each invocation,
|
||||
// but it can help with cardinality concerns for downstream processors since they can
|
||||
// perform aggregation for a time interval and then discard the data once that time
|
||||
// period has passed. Cumulative temporality would imply to the downstream processor
|
||||
// that they might receive a successive point and they may unnecessarily keep state
|
||||
// they really shouldn't.
|
||||
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.DeltaTemporality
|
||||
}
|
||||
@ -1,138 +0,0 @@
|
||||
// FIXME(jsternberg): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
otelContextFieldName string = "otel"
|
||||
otelExporterOTLPEndpoint string = "OTEL_EXPORTER_OTLP_ENDPOINT"
|
||||
debugEnvVarPrefix string = "DOCKER_CLI_"
|
||||
)
|
||||
|
||||
// dockerExporterOTLPEndpoint retrieves the OTLP endpoint used for the docker reporter
|
||||
// from the current context.
|
||||
func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) {
|
||||
meta, err := cli.ContextStore().GetMetadata(cli.CurrentContext())
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return "", false
|
||||
}
|
||||
|
||||
var otelCfg any
|
||||
switch m := meta.Metadata.(type) {
|
||||
case DockerContext:
|
||||
otelCfg = m.AdditionalFields[otelContextFieldName]
|
||||
case map[string]any:
|
||||
otelCfg = m[otelContextFieldName]
|
||||
}
|
||||
|
||||
if otelCfg != nil {
|
||||
otelMap, ok := otelCfg.(map[string]any)
|
||||
if !ok {
|
||||
otel.Handle(errors.Errorf(
|
||||
"unexpected type for field %q: %T (expected: %T)",
|
||||
otelContextFieldName,
|
||||
otelCfg,
|
||||
otelMap,
|
||||
))
|
||||
}
|
||||
// keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
|
||||
endpoint, _ = otelMap[otelExporterOTLPEndpoint].(string)
|
||||
}
|
||||
|
||||
// Override with env var value if it exists AND IS SET
|
||||
// (ignore otel defaults for this override when the key exists but is empty)
|
||||
if override := os.Getenv(debugEnvVarPrefix + otelExporterOTLPEndpoint); override != "" {
|
||||
endpoint = override
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Parse the endpoint. The docker config expects the endpoint to be
|
||||
// in the form of a URL to match the environment variable, but this
|
||||
// option doesn't correspond directly to WithEndpoint.
|
||||
//
|
||||
// We pretend we're the same as the environment reader.
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
otel.Handle(errors.Errorf("docker otel endpoint is invalid: %s", err))
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "unix":
|
||||
// Unix sockets are a bit weird. OTEL seems to imply they
|
||||
// can be used as an environment variable and are handled properly,
|
||||
// but they don't seem to be as the behavior of the environment variable
|
||||
// is to strip the scheme from the endpoint, but the underlying implementation
|
||||
// needs the scheme to use the correct resolver.
|
||||
//
|
||||
// We'll just handle this in a special way and add the unix:// back to the endpoint.
|
||||
endpoint = fmt.Sprintf("unix://%s", path.Join(u.Host, u.Path))
|
||||
case "https":
|
||||
secure = true
|
||||
fallthrough
|
||||
case "http":
|
||||
endpoint = path.Join(u.Host, u.Path)
|
||||
}
|
||||
return endpoint, secure
|
||||
}
|
||||
|
||||
func dockerSpanExporter(ctx context.Context, cli Cli) []sdktrace.TracerProviderOption {
|
||||
endpoint, secure := dockerExporterOTLPEndpoint(cli)
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := []otlptracegrpc.Option{
|
||||
otlptracegrpc.WithEndpoint(endpoint),
|
||||
}
|
||||
if !secure {
|
||||
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||
}
|
||||
|
||||
exp, err := otlptracegrpc.New(ctx, opts...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return nil
|
||||
}
|
||||
return []sdktrace.TracerProviderOption{sdktrace.WithBatcher(exp, sdktrace.WithExportTimeout(exportTimeout))}
|
||||
}
|
||||
|
||||
func dockerMetricExporter(ctx context.Context, cli Cli) []sdkmetric.Option {
|
||||
endpoint, secure := dockerExporterOTLPEndpoint(cli)
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithEndpoint(endpoint),
|
||||
}
|
||||
if !secure {
|
||||
opts = append(opts, otlpmetricgrpc.WithInsecure())
|
||||
}
|
||||
|
||||
exp, err := otlpmetricgrpc.New(ctx, opts...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return nil
|
||||
}
|
||||
return []sdkmetric.Option{sdkmetric.WithReader(newCLIReader(exp))}
|
||||
}
|
||||
@ -1,167 +0,0 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/version"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
// BaseCommandAttributes returns an attribute.Set containing attributes to attach to metrics/traces
|
||||
func BaseCommandAttributes(cmd *cobra.Command, streams Streams) []attribute.KeyValue {
|
||||
return append([]attribute.KeyValue{
|
||||
attribute.String("command.name", getCommandName(cmd)),
|
||||
}, stdioAttributes(streams)...)
|
||||
}
|
||||
|
||||
// InstrumentCobraCommands wraps all cobra commands' RunE funcs to set a command duration metric using otel.
|
||||
//
|
||||
// Note: this should be the last func to wrap/modify the PersistentRunE/RunE funcs before command execution.
|
||||
//
|
||||
// can also be used for spans!
|
||||
func (cli *DockerCli) InstrumentCobraCommands(cmd *cobra.Command, mp metric.MeterProvider) {
|
||||
meter := getDefaultMeter(mp)
|
||||
// If PersistentPreRunE is nil, make it execute PersistentPreRun and return nil by default
|
||||
ogPersistentPreRunE := cmd.PersistentPreRunE
|
||||
if ogPersistentPreRunE == nil {
|
||||
ogPersistentPreRun := cmd.PersistentPreRun
|
||||
//nolint:unparam // necessary because error will always be nil here
|
||||
ogPersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
ogPersistentPreRun(cmd, args)
|
||||
return nil
|
||||
}
|
||||
cmd.PersistentPreRun = nil
|
||||
}
|
||||
|
||||
// wrap RunE in PersistentPreRunE so that this operation gets executed on all children commands
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
// If RunE is nil, make it execute Run and return nil by default
|
||||
ogRunE := cmd.RunE
|
||||
if ogRunE == nil {
|
||||
ogRun := cmd.Run
|
||||
//nolint:unparam // necessary because error will always be nil here
|
||||
ogRunE = func(cmd *cobra.Command, args []string) error {
|
||||
ogRun(cmd, args)
|
||||
return nil
|
||||
}
|
||||
cmd.Run = nil
|
||||
}
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
// start the timer as the first step of every cobra command
|
||||
baseAttrs := BaseCommandAttributes(cmd, cli)
|
||||
stopCobraCmdTimer := startCobraCommandTimer(cmd, meter, baseAttrs)
|
||||
cmdErr := ogRunE(cmd, args)
|
||||
stopCobraCmdTimer(cmdErr)
|
||||
return cmdErr
|
||||
}
|
||||
|
||||
return ogPersistentPreRunE(cmd, args)
|
||||
}
|
||||
}
|
||||
|
||||
func startCobraCommandTimer(cmd *cobra.Command, meter metric.Meter, attrs []attribute.KeyValue) func(err error) {
|
||||
ctx := cmd.Context()
|
||||
durationCounter, _ := meter.Float64Counter(
|
||||
"command.time",
|
||||
metric.WithDescription("Measures the duration of the cobra command"),
|
||||
metric.WithUnit("ms"),
|
||||
)
|
||||
start := time.Now()
|
||||
|
||||
return func(err error) {
|
||||
duration := float64(time.Since(start)) / float64(time.Millisecond)
|
||||
cmdStatusAttrs := attributesFromError(err)
|
||||
durationCounter.Add(ctx, duration,
|
||||
metric.WithAttributes(attrs...),
|
||||
metric.WithAttributes(cmdStatusAttrs...),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func stdioAttributes(streams Streams) []attribute.KeyValue {
|
||||
// we don't wrap stderr, but we do wrap in/out
|
||||
_, stderrTty := term.GetFdInfo(streams.Err())
|
||||
return []attribute.KeyValue{
|
||||
attribute.Bool("command.stdin.isatty", streams.In().IsTerminal()),
|
||||
attribute.Bool("command.stdout.isatty", streams.Out().IsTerminal()),
|
||||
attribute.Bool("command.stderr.isatty", stderrTty),
|
||||
}
|
||||
}
|
||||
|
||||
func attributesFromError(err error) []attribute.KeyValue {
|
||||
attrs := []attribute.KeyValue{}
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
exitCode = 1
|
||||
if stderr, ok := err.(statusError); ok {
|
||||
// StatusError should only be used for errors, and all errors should
|
||||
// have a non-zero exit status, so only set this here if this value isn't 0
|
||||
if stderr.StatusCode != 0 {
|
||||
exitCode = stderr.StatusCode
|
||||
}
|
||||
}
|
||||
attrs = append(attrs, attribute.String("command.error.type", otelErrorType(err)))
|
||||
}
|
||||
attrs = append(attrs, attribute.Int("command.status.code", exitCode))
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
// otelErrorType returns an attribute for the error type based on the error category.
|
||||
func otelErrorType(err error) string {
|
||||
name := "generic"
|
||||
if errors.Is(err, context.Canceled) {
|
||||
name = "canceled"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// statusError reports an unsuccessful exit by a command.
|
||||
type statusError struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (e statusError) Error() string {
|
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
|
||||
}
|
||||
|
||||
// getCommandName gets the cobra command name in the format
|
||||
// `... parentCommandName commandName` by traversing it's parent commands recursively.
|
||||
// until the root command is reached.
|
||||
//
|
||||
// Note: The root command's name is excluded. If cmd is the root cmd, return ""
|
||||
func getCommandName(cmd *cobra.Command) string {
|
||||
fullCmdName := getFullCommandName(cmd)
|
||||
i := strings.Index(fullCmdName, " ")
|
||||
if i == -1 {
|
||||
return ""
|
||||
}
|
||||
return fullCmdName[i+1:]
|
||||
}
|
||||
|
||||
// getFullCommandName gets the full cobra command name in the format
|
||||
// `... parentCommandName commandName` by traversing it's parent commands recursively
|
||||
// until the root command is reached.
|
||||
func getFullCommandName(cmd *cobra.Command) string {
|
||||
if cmd.HasParent() {
|
||||
return fmt.Sprintf("%s %s", getFullCommandName(cmd.Parent()), cmd.Name())
|
||||
}
|
||||
return cmd.Name()
|
||||
}
|
||||
|
||||
// getDefaultMeter gets the default metric.Meter for the application
|
||||
// using the given metric.MeterProvider
|
||||
func getDefaultMeter(mp metric.MeterProvider) metric.Meter {
|
||||
return mp.Meter(
|
||||
"github.com/docker/cli",
|
||||
metric.WithInstrumentationVersion(version.Version),
|
||||
)
|
||||
}
|
||||
@ -1,189 +0,0 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func setupCobraCommands() (*cobra.Command, *cobra.Command, *cobra.Command) {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "root [OPTIONS] COMMAND [ARG...]",
|
||||
}
|
||||
childCmd := &cobra.Command{
|
||||
Use: "child [OPTIONS] COMMAND [ARG...]",
|
||||
}
|
||||
grandchildCmd := &cobra.Command{
|
||||
Use: "grandchild [OPTIONS] COMMAND [ARG...]",
|
||||
}
|
||||
childCmd.AddCommand(grandchildCmd)
|
||||
rootCmd.AddCommand(childCmd)
|
||||
|
||||
return rootCmd, childCmd, grandchildCmd
|
||||
}
|
||||
|
||||
func TestGetFullCommandName(t *testing.T) {
|
||||
rootCmd, childCmd, grandchildCmd := setupCobraCommands()
|
||||
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
testName string
|
||||
cmd *cobra.Command
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
testName: "rootCmd",
|
||||
cmd: rootCmd,
|
||||
expected: "root",
|
||||
},
|
||||
{
|
||||
testName: "childCmd",
|
||||
cmd: childCmd,
|
||||
expected: "root child",
|
||||
},
|
||||
{
|
||||
testName: "grandChild",
|
||||
cmd: grandchildCmd,
|
||||
expected: "root child grandchild",
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := getFullCommandName(tc.cmd)
|
||||
assert.Equal(t, actual, tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCommandName(t *testing.T) {
|
||||
rootCmd, childCmd, grandchildCmd := setupCobraCommands()
|
||||
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
testName string
|
||||
cmd *cobra.Command
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
testName: "rootCmd",
|
||||
cmd: rootCmd,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
testName: "childCmd",
|
||||
cmd: childCmd,
|
||||
expected: "child",
|
||||
},
|
||||
{
|
||||
testName: "grandchildCmd",
|
||||
cmd: grandchildCmd,
|
||||
expected: "child grandchild",
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := getCommandName(tc.cmd)
|
||||
assert.Equal(t, actual, tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStdioAttributes(t *testing.T) {
|
||||
outBuffer := new(bytes.Buffer)
|
||||
errBuffer := new(bytes.Buffer)
|
||||
t.Parallel()
|
||||
for _, tc := range []struct {
|
||||
test string
|
||||
stdinTty bool
|
||||
stdoutTty bool
|
||||
// TODO(laurazard): test stderr
|
||||
expected []attribute.KeyValue
|
||||
}{
|
||||
{
|
||||
test: "",
|
||||
expected: []attribute.KeyValue{
|
||||
attribute.Bool("command.stdin.isatty", false),
|
||||
attribute.Bool("command.stdout.isatty", false),
|
||||
attribute.Bool("command.stderr.isatty", false),
|
||||
},
|
||||
},
|
||||
{
|
||||
test: "",
|
||||
stdinTty: true,
|
||||
stdoutTty: true,
|
||||
expected: []attribute.KeyValue{
|
||||
attribute.Bool("command.stdin.isatty", true),
|
||||
attribute.Bool("command.stdout.isatty", true),
|
||||
attribute.Bool("command.stderr.isatty", false),
|
||||
},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.test, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := &DockerCli{
|
||||
in: streams.NewIn(io.NopCloser(strings.NewReader(""))),
|
||||
out: streams.NewOut(outBuffer),
|
||||
err: errBuffer,
|
||||
}
|
||||
cli.In().SetIsTerminal(tc.stdinTty)
|
||||
cli.Out().SetIsTerminal(tc.stdoutTty)
|
||||
actual := stdioAttributes(cli)
|
||||
|
||||
assert.Check(t, reflect.DeepEqual(actual, tc.expected))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttributesFromError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
testName string
|
||||
err error
|
||||
expected []attribute.KeyValue
|
||||
}{
|
||||
{
|
||||
testName: "no error",
|
||||
err: nil,
|
||||
expected: []attribute.KeyValue{
|
||||
attribute.Int("command.status.code", 0),
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "non-0 exit code",
|
||||
err: statusError{StatusCode: 127},
|
||||
expected: []attribute.KeyValue{
|
||||
attribute.String("command.error.type", "generic"),
|
||||
attribute.Int("command.status.code", 127),
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "canceled",
|
||||
err: context.Canceled,
|
||||
expected: []attribute.KeyValue{
|
||||
attribute.String("command.error.type", "canceled"),
|
||||
attribute.Int("command.status.code", 1),
|
||||
},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := attributesFromError(tc.err)
|
||||
assert.Check(t, reflect.DeepEqual(actual, tc.expected))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
@ -46,10 +45,12 @@ func revokeTrust(ctx context.Context, dockerCLI command.Cli, remote string, opti
|
||||
if imgRefAndAuth.Tag() == "" && !options.forceYes {
|
||||
deleteRemote, err := command.PromptForConfirmation(ctx, dockerCLI.In(), dockerCLI.Out(), fmt.Sprintf("Please confirm you would like to delete all signature data for %s?", remote))
|
||||
if err != nil {
|
||||
return err
|
||||
fmt.Fprintf(dockerCLI.Out(), "\nAborting action.\n")
|
||||
return errors.Wrap(err, "aborting action")
|
||||
}
|
||||
if !deleteRemote {
|
||||
return errdefs.Cancelled(errors.New("trust revoke has been cancelled"))
|
||||
fmt.Fprintf(dockerCLI.Out(), "\nAborting action.\n")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/cli/internal/test/notary"
|
||||
@ -57,8 +58,6 @@ func TestTrustRevokeCommandErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTrustRevokeCommand(t *testing.T) {
|
||||
revokeCancelledError := "trust revoke has been cancelled"
|
||||
|
||||
testCases := []struct {
|
||||
doc string
|
||||
notaryRepository func(trust.ImageRefAndAuth, []string) (client.Repository, error)
|
||||
@ -70,8 +69,7 @@ func TestTrustRevokeCommand(t *testing.T) {
|
||||
doc: "OfflineErrors_Confirm",
|
||||
notaryRepository: notary.GetOfflineNotaryRepository,
|
||||
args: []string{"reg-name.io/image"},
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] ",
|
||||
expectedErr: revokeCancelledError,
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.",
|
||||
},
|
||||
{
|
||||
doc: "OfflineErrors_Offline",
|
||||
@ -89,8 +87,7 @@ func TestTrustRevokeCommand(t *testing.T) {
|
||||
doc: "UninitializedErrors_Confirm",
|
||||
notaryRepository: notary.GetUninitializedNotaryRepository,
|
||||
args: []string{"reg-name.io/image"},
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] ",
|
||||
expectedErr: revokeCancelledError,
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.",
|
||||
},
|
||||
{
|
||||
doc: "UninitializedErrors_NoTrustData",
|
||||
@ -108,8 +105,7 @@ func TestTrustRevokeCommand(t *testing.T) {
|
||||
doc: "EmptyNotaryRepo_Confirm",
|
||||
notaryRepository: notary.GetEmptyTargetsNotaryRepository,
|
||||
args: []string{"reg-name.io/image"},
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] ",
|
||||
expectedErr: revokeCancelledError,
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.",
|
||||
},
|
||||
{
|
||||
doc: "EmptyNotaryRepo_NoSignedTags",
|
||||
@ -127,8 +123,7 @@ func TestTrustRevokeCommand(t *testing.T) {
|
||||
doc: "AllSigConfirmation",
|
||||
notaryRepository: notary.GetEmptyTargetsNotaryRepository,
|
||||
args: []string{"alpine"},
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for alpine? [y/N] ",
|
||||
expectedErr: revokeCancelledError,
|
||||
expectedMessage: "Please confirm you would like to delete all signature data for alpine? [y/N] \nAborting action.",
|
||||
},
|
||||
}
|
||||
|
||||
@ -141,9 +136,9 @@ func TestTrustRevokeCommand(t *testing.T) {
|
||||
cmd.SetOut(io.Discard)
|
||||
if tc.expectedErr != "" {
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedErr)
|
||||
} else {
|
||||
assert.NilError(t, cmd.Execute())
|
||||
return
|
||||
}
|
||||
assert.NilError(t, cmd.Execute())
|
||||
assert.Check(t, is.Contains(cli.OutBuffer().String(), tc.expectedMessage))
|
||||
})
|
||||
}
|
||||
@ -164,6 +159,10 @@ func TestRevokeTrustPromptTermination(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
cmd := newRevokeCommand(cli)
|
||||
cmd.SetArgs([]string{"example/trust-demo"})
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
})
|
||||
assert.Equal(t, cli.ErrBuffer().String(), "")
|
||||
golden.Assert(t, cli.OutBuffer().String(), "trust-revoke-prompt-termination.golden")
|
||||
}
|
||||
|
||||
@ -132,12 +132,10 @@ func removeSingleSigner(ctx context.Context, dockerCLI command.Cli, repoName, si
|
||||
}
|
||||
|
||||
ok, err := maybePromptForSignerRemoval(ctx, dockerCLI, repoName, signerName, isLastSigner, forceYes)
|
||||
if err != nil {
|
||||
if err != nil || !ok {
|
||||
fmt.Fprintf(dockerCLI.Out(), "\nAborting action.\n")
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := notaryRepo.RemoveDelegationKeys(releasesRoleTUFName, role.KeyIDs); err != nil {
|
||||
return false, err
|
||||
|
||||
@ -1 +1,2 @@
|
||||
Please confirm you would like to delete all signature data for example/trust-demo? [y/N]
|
||||
Aborting action.
|
||||
|
||||
@ -19,7 +19,6 @@ import (
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/moby/sys/sequential"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
@ -76,7 +75,9 @@ func PrettyPrint(i any) string {
|
||||
}
|
||||
}
|
||||
|
||||
var ErrPromptTerminated = errdefs.Cancelled(errors.New("prompt terminated"))
|
||||
type PromptError error
|
||||
|
||||
var ErrPromptTerminated = PromptError(errors.New("prompt terminated"))
|
||||
|
||||
// PromptForConfirmation requests and checks confirmation from the user.
|
||||
// This will display the provided message followed by ' [y/N] '. If the user
|
||||
@ -122,8 +123,6 @@ func PromptForConfirmation(ctx context.Context, ins io.Reader, outs io.Writer, m
|
||||
|
||||
select {
|
||||
case <-notifyCtx.Done():
|
||||
// print a newline on termination
|
||||
_, _ = fmt.Fprintln(outs, "")
|
||||
return false, ErrPromptTerminated
|
||||
case r := <-result:
|
||||
return r, nil
|
||||
|
||||
@ -106,68 +106,120 @@ func TestPromptForConfirmation(t *testing.T) {
|
||||
}()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
f func() error
|
||||
expected promptResult
|
||||
desc string
|
||||
f func(*testing.T, context.Context, chan promptResult)
|
||||
}{
|
||||
{"SIGINT", func() error {
|
||||
{"SIGINT", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
syscall.Kill(syscall.Getpid(), syscall.SIGINT)
|
||||
return nil
|
||||
}, promptResult{false, command.ErrPromptTerminated}},
|
||||
{"no", func() error {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after SIGINT")
|
||||
case r := <-c:
|
||||
assert.Check(t, !r.result)
|
||||
assert.ErrorContains(t, r.err, "prompt terminated")
|
||||
}
|
||||
}},
|
||||
{"no", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
_, err := fmt.Fprint(promptWriter, "n\n")
|
||||
return err
|
||||
}, promptResult{false, nil}},
|
||||
{"yes", func() error {
|
||||
assert.NilError(t, err)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after user input `n`")
|
||||
case r := <-c:
|
||||
assert.Check(t, !r.result)
|
||||
assert.NilError(t, r.err)
|
||||
}
|
||||
}},
|
||||
{"yes", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
_, err := fmt.Fprint(promptWriter, "y\n")
|
||||
return err
|
||||
}, promptResult{true, nil}},
|
||||
{"any", func() error {
|
||||
assert.NilError(t, err)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after user input `y`")
|
||||
case r := <-c:
|
||||
assert.Check(t, r.result)
|
||||
assert.NilError(t, r.err)
|
||||
}
|
||||
}},
|
||||
{"any", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
_, err := fmt.Fprint(promptWriter, "a\n")
|
||||
return err
|
||||
}, promptResult{false, nil}},
|
||||
{"with space", func() error {
|
||||
assert.NilError(t, err)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after user input `a`")
|
||||
case r := <-c:
|
||||
assert.Check(t, !r.result)
|
||||
assert.NilError(t, r.err)
|
||||
}
|
||||
}},
|
||||
{"with space", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
_, err := fmt.Fprint(promptWriter, " y\n")
|
||||
return err
|
||||
}, promptResult{true, nil}},
|
||||
{"reader closed", func() error {
|
||||
return promptReader.Close()
|
||||
}, promptResult{false, nil}},
|
||||
assert.NilError(t, err)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after user input ` y`")
|
||||
case r := <-c:
|
||||
assert.Check(t, r.result)
|
||||
assert.NilError(t, r.err)
|
||||
}
|
||||
}},
|
||||
{"reader closed", func(t *testing.T, ctx context.Context, c chan promptResult) {
|
||||
t.Helper()
|
||||
|
||||
assert.NilError(t, promptReader.Close())
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("PromptForConfirmation did not return after promptReader was closed")
|
||||
case r := <-c:
|
||||
assert.Check(t, !r.result)
|
||||
assert.NilError(t, r.err)
|
||||
}
|
||||
}},
|
||||
} {
|
||||
t.Run("case="+tc.desc, func(t *testing.T) {
|
||||
buf.Reset()
|
||||
promptReader, promptWriter = io.Pipe()
|
||||
|
||||
wroteHook := make(chan struct{}, 1)
|
||||
defer close(wroteHook)
|
||||
promptOut := test.NewWriterWithHook(bufioWriter, func(p []byte) {
|
||||
wroteHook <- struct{}{}
|
||||
})
|
||||
|
||||
result := make(chan promptResult, 1)
|
||||
defer close(result)
|
||||
go func() {
|
||||
r, err := command.PromptForConfirmation(ctx, promptReader, promptOut, "")
|
||||
result <- promptResult{r, err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-wroteHook:
|
||||
}
|
||||
// wait for the Prompt to write to the buffer
|
||||
pollForPromptOutput(ctx, t, wroteHook)
|
||||
drainChannel(ctx, wroteHook)
|
||||
|
||||
assert.NilError(t, bufioWriter.Flush())
|
||||
assert.Equal(t, strings.TrimSpace(buf.String()), "Are you sure you want to proceed? [y/N]")
|
||||
|
||||
// wait for the Prompt to write to the buffer
|
||||
drainChannel(ctx, wroteHook)
|
||||
resultCtx, resultCancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer resultCancel()
|
||||
|
||||
assert.NilError(t, tc.f())
|
||||
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("timeout waiting for prompt result")
|
||||
case r := <-result:
|
||||
assert.Equal(t, r, tc.expected)
|
||||
}
|
||||
tc.f(t, resultCtx, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -183,3 +235,20 @@ func drainChannel(ctx context.Context, ch <-chan struct{}) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func pollForPromptOutput(ctx context.Context, t *testing.T, wroteHook <-chan struct{}) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Prompt output was not written to before ctx was cancelled")
|
||||
return
|
||||
case <-wroteHook:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,6 +32,10 @@ func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
spaceReclaimed, output, err := runPrune(cmd.Context(), dockerCli, options)
|
||||
if err != nil {
|
||||
if errdefs.IsCancelled(err) {
|
||||
fmt.Fprintln(dockerCli.Out(), output)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if output != "" {
|
||||
@ -77,12 +81,8 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
warning = allVolumesWarning
|
||||
}
|
||||
if !options.force {
|
||||
r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
if !r {
|
||||
return 0, "", errdefs.Cancelled(errors.New("volume prune has been cancelled"))
|
||||
if r, err := command.PromptForConfirmation(ctx, dockerCli.In(), dockerCli.Out(), warning); !r || err != nil {
|
||||
return 0, "", errdefs.Cancelled(errors.New("user cancelled operation"))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -155,7 +155,6 @@ func TestVolumePrunePromptYes(t *testing.T) {
|
||||
|
||||
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
|
||||
cmd := NewPruneCommand(cli)
|
||||
cmd.SetArgs([]string{})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-yes.golden")
|
||||
}
|
||||
@ -172,8 +171,7 @@ func TestVolumePrunePromptNo(t *testing.T) {
|
||||
|
||||
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
|
||||
cmd := NewPruneCommand(cli)
|
||||
cmd.SetArgs([]string{})
|
||||
assert.ErrorContains(t, cmd.Execute(), "volume prune has been cancelled")
|
||||
assert.NilError(t, cmd.Execute())
|
||||
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-no.golden")
|
||||
}
|
||||
}
|
||||
@ -198,7 +196,7 @@ func TestVolumePrunePromptTerminate(t *testing.T) {
|
||||
})
|
||||
|
||||
cmd := NewPruneCommand(cli)
|
||||
cmd.SetArgs([]string{})
|
||||
test.TerminatePrompt(ctx, t, cmd, cli)
|
||||
test.TerminatePrompt(ctx, t, cmd, cli, nil)
|
||||
|
||||
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-terminate.golden")
|
||||
}
|
||||
|
||||
@ -1,2 +1,2 @@
|
||||
WARNING! This will remove anonymous local volumes not used by at least one container.
|
||||
Are you sure you want to continue? [y/N]
|
||||
Are you sure you want to continue? [y/N]
|
||||
|
||||
@ -16,16 +16,6 @@ func TestNamespaceScope(t *testing.T) {
|
||||
assert.Check(t, is.Equal("foo_bar", scoped))
|
||||
}
|
||||
|
||||
func TestNamespaceDescope(t *testing.T) {
|
||||
descoped := Namespace{name: "foo"}.Descope("foo_bar")
|
||||
assert.Check(t, is.Equal("bar", descoped))
|
||||
}
|
||||
|
||||
func TestNamespaceName(t *testing.T) {
|
||||
namespaceName := Namespace{name: "foo"}.Name()
|
||||
assert.Check(t, is.Equal("foo", namespaceName))
|
||||
}
|
||||
|
||||
func TestAddStackLabel(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"something": "labeled",
|
||||
|
||||
@ -61,15 +61,6 @@ func TestConvertEnvironment(t *testing.T) {
|
||||
assert.Check(t, is.DeepEqual([]string{"foo=bar", "key=value"}, env))
|
||||
}
|
||||
|
||||
func TestConvertEnvironmentWhenNilValueExists(t *testing.T) {
|
||||
source := map[string]*string{
|
||||
"key": strPtr("value"),
|
||||
"keyWithNoValue": nil,
|
||||
}
|
||||
env := convertEnvironment(source)
|
||||
assert.Check(t, is.DeepEqual([]string{"key=value", "keyWithNoValue"}, env))
|
||||
}
|
||||
|
||||
func TestConvertExtraHosts(t *testing.T) {
|
||||
source := composetypes.HostsList{
|
||||
"zulu:127.0.0.2",
|
||||
|
||||
@ -9,55 +9,6 @@ import (
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestVolumesWithMultipleServiceVolumeConfigs(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
serviceVolumes := []composetypes.ServiceVolumeConfig{
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/foo",
|
||||
},
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/foo/bar",
|
||||
},
|
||||
}
|
||||
|
||||
expected := []mount.Mount{
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/foo",
|
||||
},
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/foo/bar",
|
||||
},
|
||||
}
|
||||
|
||||
mnt, err := Volumes(serviceVolumes, volumes{}, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestVolumesWithMultipleServiceVolumeConfigsWithUndefinedVolumeConfig(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
serviceVolumes := []composetypes.ServiceVolumeConfig{
|
||||
{
|
||||
Type: "volume",
|
||||
Source: "foo",
|
||||
Target: "/foo",
|
||||
},
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/foo/bar",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := Volumes(serviceVolumes, volumes{}, namespace)
|
||||
assert.Error(t, err, "undefined volume \"foo\"")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountAnonymousVolume(t *testing.T) {
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "volume",
|
||||
@ -153,49 +104,6 @@ func TestConvertVolumeToMountConflictingOptionsTmpfsInBind(t *testing.T) {
|
||||
assert.Error(t, err, "tmpfs options are incompatible with type bind")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountConflictingOptionsClusterInVolume(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "volume",
|
||||
Target: "/target",
|
||||
Cluster: &composetypes.ServiceVolumeCluster{},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "cluster options are incompatible with type volume")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountConflictingOptionsClusterInBind(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "bind",
|
||||
Source: "/foo",
|
||||
Target: "/target",
|
||||
Bind: &composetypes.ServiceVolumeBind{
|
||||
Propagation: "slave",
|
||||
},
|
||||
Cluster: &composetypes.ServiceVolumeCluster{},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "cluster options are incompatible with type bind")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountConflictingOptionsClusterInTmpfs(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "tmpfs",
|
||||
Target: "/target",
|
||||
Tmpfs: &composetypes.ServiceVolumeTmpfs{
|
||||
Size: 1000,
|
||||
},
|
||||
Cluster: &composetypes.ServiceVolumeCluster{},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "cluster options are incompatible with type tmpfs")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountConflictingOptionsBindInTmpfs(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
@ -224,50 +132,6 @@ func TestConvertVolumeToMountConflictingOptionsVolumeInTmpfs(t *testing.T) {
|
||||
assert.Error(t, err, "volume options are incompatible with type tmpfs")
|
||||
}
|
||||
|
||||
func TestHandleNpipeToMountAnonymousNpipe(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "npipe",
|
||||
Target: "/target",
|
||||
Volume: &composetypes.ServiceVolumeVolume{
|
||||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "invalid npipe source, source cannot be empty")
|
||||
}
|
||||
|
||||
func TestHandleNpipeToMountConflictingOptionsTmpfsInNpipe(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "npipe",
|
||||
Source: "/foo",
|
||||
Target: "/target",
|
||||
Tmpfs: &composetypes.ServiceVolumeTmpfs{
|
||||
Size: 1000,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "tmpfs options are incompatible with type npipe")
|
||||
}
|
||||
|
||||
func TestHandleNpipeToMountConflictingOptionsVolumeInNpipe(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "npipe",
|
||||
Source: "/foo",
|
||||
Target: "/target",
|
||||
Volume: &composetypes.ServiceVolumeVolume{
|
||||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "volume options are incompatible with type npipe")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolume(t *testing.T) {
|
||||
stackVolumes := volumes{
|
||||
"normal": composetypes.VolumeConfig{
|
||||
@ -480,27 +344,6 @@ func TestConvertTmpfsToMountVolumeWithSource(t *testing.T) {
|
||||
assert.Error(t, err, "invalid tmpfs source, source must be empty")
|
||||
}
|
||||
|
||||
func TestHandleNpipeToMountBind(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeNamedPipe,
|
||||
Source: "/bar",
|
||||
Target: "/foo",
|
||||
ReadOnly: true,
|
||||
BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared},
|
||||
}
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "npipe",
|
||||
Source: "/bar",
|
||||
Target: "/foo",
|
||||
ReadOnly: true,
|
||||
Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"},
|
||||
}
|
||||
mnt, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountAnonymousNpipe(t *testing.T) {
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "npipe",
|
||||
@ -584,98 +427,3 @@ func TestConvertVolumeMountClusterGroup(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountAnonymousCluster(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Target: "/target",
|
||||
Volume: &composetypes.ServiceVolumeVolume{
|
||||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "invalid cluster source, source cannot be empty")
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountConflictingOptionsTmpfsInCluster(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Source: "/foo",
|
||||
Target: "/target",
|
||||
Tmpfs: &composetypes.ServiceVolumeTmpfs{
|
||||
Size: 1000,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "tmpfs options are incompatible with type cluster")
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountConflictingOptionsVolumeInCluster(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Source: "/foo",
|
||||
Target: "/target",
|
||||
Volume: &composetypes.ServiceVolumeVolume{
|
||||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "volume options are incompatible with type cluster")
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountWithUndefinedVolumeConfig(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Source: "foo",
|
||||
Target: "/srv",
|
||||
}
|
||||
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "undefined volume \"foo\"")
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountWithVolumeConfigName(t *testing.T) {
|
||||
stackVolumes := volumes{
|
||||
"foo": composetypes.VolumeConfig{
|
||||
Name: "bar",
|
||||
},
|
||||
}
|
||||
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Source: "foo",
|
||||
Target: "/srv",
|
||||
}
|
||||
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeCluster,
|
||||
Source: "bar",
|
||||
Target: "/srv",
|
||||
ClusterOptions: &mount.ClusterOptions{},
|
||||
}
|
||||
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestHandleClusterToMountBind(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
config := composetypes.ServiceVolumeConfig{
|
||||
Type: "cluster",
|
||||
Source: "/bar",
|
||||
Target: "/foo",
|
||||
ReadOnly: true,
|
||||
Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"},
|
||||
}
|
||||
_, err := convertVolumeToMount(config, volumes{}, namespace)
|
||||
assert.Error(t, err, "bind options are incompatible with type cluster")
|
||||
}
|
||||
|
||||
@ -41,7 +41,6 @@ type ConfigFile struct {
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
Aliases map[string]string `json:"aliases,omitempty"`
|
||||
Features map[string]string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
// Enable sets the DEBUG env var to true
|
||||
@ -25,13 +24,3 @@ func Disable() {
|
||||
func IsEnabled() bool {
|
||||
return os.Getenv("DEBUG") != ""
|
||||
}
|
||||
|
||||
// OTELErrorHandler is an error handler for OTEL that
|
||||
// uses the CLI debug package to log messages when an error
|
||||
// occurs.
|
||||
//
|
||||
// The default is to log to the debug level which is only
|
||||
// enabled when debugging is enabled.
|
||||
var OTELErrorHandler otel.ErrorHandler = otel.ErrorHandlerFunc(func(err error) {
|
||||
logrus.WithError(err).Debug("otel error")
|
||||
})
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
@ -14,30 +14,25 @@ import (
|
||||
"github.com/docker/cli/cli-plugins/socket"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/commands"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/cli/version"
|
||||
platformsignals "github.com/docker/cli/cmd/docker/internal/signals"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
dockerCli, err := command.NewDockerCli(command.WithBaseContext(ctx))
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logrus.SetOutput(dockerCli.Err())
|
||||
otel.SetErrorHandler(debug.OTELErrorHandler)
|
||||
|
||||
if err := runDocker(ctx, dockerCli); err != nil {
|
||||
if err := runDocker(dockerCli); err != nil {
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
if sterr.Status != "" {
|
||||
fmt.Fprintln(dockerCli.Err(), sterr.Status)
|
||||
@ -49,9 +44,6 @@ func main() {
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
}
|
||||
if errdefs.IsCancelled(err) {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@ -229,46 +221,38 @@ func tryPluginRun(dockerCli command.Cli, cmd *cobra.Command, subcommand string,
|
||||
return err
|
||||
}
|
||||
|
||||
// Establish the plugin socket, adding it to the environment under a
|
||||
// well-known key if successful.
|
||||
srv, err := socket.NewPluginServer(nil)
|
||||
// Establish the plugin socket, adding it to the environment under a well-known key if successful.
|
||||
var conn *net.UnixConn
|
||||
listener, err := socket.SetupConn(&conn)
|
||||
if err == nil {
|
||||
plugincmd.Env = append(plugincmd.Env, socket.EnvKey+"="+srv.Addr().String())
|
||||
envs = append(envs, socket.EnvKey+"="+listener.Addr().String())
|
||||
defer listener.Close()
|
||||
}
|
||||
|
||||
// Set additional environment variables specified by the caller.
|
||||
plugincmd.Env = append(plugincmd.Env, envs...)
|
||||
plugincmd.Env = append(envs, plugincmd.Env...)
|
||||
|
||||
// Background signal handling logic: block on the signals channel, and
|
||||
// notify the plugin via the PluginServer (or signal) as appropriate.
|
||||
const exitLimit = 3
|
||||
|
||||
signals := make(chan os.Signal, exitLimit)
|
||||
signal.Notify(signals, platformsignals.TerminationSignals...)
|
||||
// signal handling goroutine: listen on signals channel, and if conn is
|
||||
// non-nil, attempt to close it to let the plugin know to exit. Regardless
|
||||
// of whether we successfully signal the plugin or not, after 3 SIGINTs,
|
||||
// we send a SIGKILL to the plugin process and exit
|
||||
go func() {
|
||||
retries := 0
|
||||
for range signals {
|
||||
// If stdin is a TTY, the kernel will forward
|
||||
// signals to the subprocess because the shared
|
||||
// pgid makes the TTY a controlling terminal.
|
||||
//
|
||||
// The plugin should have it's own copy of this
|
||||
// termination logic, and exit after 3 retries
|
||||
// on it's own.
|
||||
if dockerCli.Out().IsTerminal() {
|
||||
// running attached to a terminal, so the plugin will already
|
||||
// receive signals due to sharing a pgid with the parent CLI
|
||||
continue
|
||||
}
|
||||
|
||||
// Terminate the plugin server, which will
|
||||
// close all connections with plugin
|
||||
// subprocesses, and signal them to exit.
|
||||
//
|
||||
// Repeated invocations will result in EINVAL,
|
||||
// or EBADF; but that is fine for our purposes.
|
||||
_ = srv.Close()
|
||||
|
||||
// If we're still running after 3 interruptions
|
||||
// (SIGINT/SIGTERM), send a SIGKILL to the plugin as a
|
||||
// final attempt to terminate, and exit.
|
||||
if conn != nil {
|
||||
if err := conn.Close(); err != nil {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "failed to signal plugin to close: %v\n", err)
|
||||
}
|
||||
conn = nil
|
||||
}
|
||||
retries++
|
||||
if retries >= exitLimit {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "got %d SIGTERM/SIGINTs, forcefully exiting\n", retries)
|
||||
@ -294,8 +278,7 @@ func tryPluginRun(dockerCli command.Cli, cmd *cobra.Command, subcommand string,
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runDocker(ctx context.Context, dockerCli *command.DockerCli) error {
|
||||
func runDocker(dockerCli *command.DockerCli) error {
|
||||
tcmd := newDockerCommand(dockerCli)
|
||||
|
||||
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||
@ -307,11 +290,6 @@ func runDocker(ctx context.Context, dockerCli *command.DockerCli) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mp := dockerCli.MeterProvider(ctx)
|
||||
defer mp.Shutdown(ctx)
|
||||
otel.SetMeterProvider(mp)
|
||||
dockerCli.InstrumentCobraCommands(cmd, mp)
|
||||
|
||||
var envs []string
|
||||
args, os.Args, envs, err = processAliases(dockerCli, cmd, args, os.Args)
|
||||
if err != nil {
|
||||
@ -328,43 +306,23 @@ func runDocker(ctx context.Context, dockerCli *command.DockerCli) error {
|
||||
}
|
||||
}
|
||||
|
||||
var subCommand *cobra.Command
|
||||
if len(args) > 0 {
|
||||
ccmd, _, err := cmd.Find(args)
|
||||
subCommand = ccmd
|
||||
if err != nil || pluginmanager.IsPluginCommand(ccmd) {
|
||||
err := tryPluginRun(dockerCli, cmd, args[0], envs)
|
||||
if err == nil {
|
||||
if dockerCli.HooksEnabled() && dockerCli.Out().IsTerminal() && ccmd != nil {
|
||||
pluginmanager.RunPluginHooks(dockerCli, cmd, ccmd, args)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !pluginmanager.IsNotFound(err) {
|
||||
// For plugin not found we fall through to
|
||||
// cmd.Execute() which deals with reporting
|
||||
// "command not found" in a consistent way.
|
||||
return err
|
||||
}
|
||||
// For plugin not found we fall through to
|
||||
// cmd.Execute() which deals with reporting
|
||||
// "command not found" in a consistent way.
|
||||
}
|
||||
}
|
||||
|
||||
// We've parsed global args already, so reset args to those
|
||||
// which remain.
|
||||
cmd.SetArgs(args)
|
||||
err = cmd.Execute()
|
||||
|
||||
// If the command is being executed in an interactive terminal
|
||||
// and hook are enabled, run the plugin hooks.
|
||||
if dockerCli.HooksEnabled() && dockerCli.Out().IsTerminal() && subCommand != nil {
|
||||
var errMessage string
|
||||
if err != nil {
|
||||
errMessage = err.Error()
|
||||
}
|
||||
pluginmanager.RunCLICommandHooks(dockerCli, cmd, subCommand, errMessage)
|
||||
}
|
||||
|
||||
return err
|
||||
return cmd.Execute()
|
||||
}
|
||||
|
||||
type versionDetails interface {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
variable "GO_VERSION" {
|
||||
default = "1.21.10"
|
||||
default = "1.21.9"
|
||||
}
|
||||
variable "VERSION" {
|
||||
default = ""
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GO_VERSION=1.21.9
|
||||
ARG ALPINE_VERSION=3.18
|
||||
|
||||
ARG BUILDX_VERSION=0.12.1
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GO_VERSION=1.21.9
|
||||
ARG ALPINE_VERSION=3.18
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GO_VERSION=1.21.9
|
||||
ARG ALPINE_VERSION=3.18
|
||||
ARG MODOUTDATED_VERSION=v0.8.0
|
||||
|
||||
|
||||
@ -1363,7 +1363,6 @@ in the image, or `SIGTERM` if the image has no `STOPSIGNAL` defined.
|
||||
| `--security-opt="seccomp=unconfined"` | Turn off seccomp confinement for the container |
|
||||
| `--security-opt="seccomp=builtin"` | Use the default (built-in) seccomp profile for the container. This can be used to enable seccomp for a container running on a daemon with a custom default profile set, or with seccomp disabled ("unconfined"). |
|
||||
| `--security-opt="seccomp=profile.json"` | White-listed syscalls seccomp Json file to be used as a seccomp filter |
|
||||
| `--security-opt="systempaths=unconfined"` | Turn off confinement for system paths (masked paths, read-only paths) for the container |
|
||||
|
||||
The `--security-opt` flag lets you override the default labeling scheme for a
|
||||
container. Specifying the level in the following command allows you to share
|
||||
|
||||
@ -1236,15 +1236,6 @@ The list of feature options include:
|
||||
snapshotters instead of the classic storage drivers for storing image and
|
||||
container data. For more information, see
|
||||
[containerd storage](https://docs.docker.com/storage/containerd/).
|
||||
- `windows-dns-proxy`: when set to `true`, the daemon's internal DNS resolver
|
||||
will forward requests to external servers. Without this, most applications
|
||||
running in the container will still be able to use secondary DNS servers
|
||||
configured in the container itself, but `nslookup` won't be able to resolve
|
||||
external names. The current default is `false`, it will change to `true` in
|
||||
a future release. This option is only allowed on Windows.
|
||||
|
||||
> **Warning**
|
||||
> The `windows-dns-proxy` feature flag will be removed in a future release.
|
||||
|
||||
#### Configuration reload behavior
|
||||
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/e2e/internal/fixtures"
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
@ -35,22 +34,6 @@ func TestRunAttachedFromRemoteImageAndRemove(t *testing.T) {
|
||||
golden.Assert(t, result.Stderr(), "run-attached-from-remote-and-remove.golden")
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/docker/cli/issues/5053
|
||||
func TestRunInvalidEntrypointWithAutoremove(t *testing.T) {
|
||||
environment.SkipIfDaemonNotLinux(t)
|
||||
|
||||
result := make(chan *icmd.Result)
|
||||
go func() {
|
||||
result <- icmd.RunCommand("docker", "run", "--rm", fixtures.AlpineImage, "invalidcommand")
|
||||
}()
|
||||
select {
|
||||
case r := <-result:
|
||||
r.Assert(t, icmd.Expected{ExitCode: 127})
|
||||
case <-time.After(4 * time.Second):
|
||||
t.Fatal("test took too long, shouldn't hang")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunWithContentTrust(t *testing.T) {
|
||||
skip.If(t, environment.RemoteDaemon())
|
||||
|
||||
|
||||
@ -1,25 +1,14 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/e2e/internal/fixtures"
|
||||
"github.com/docker/cli/e2e/testutils"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
@ -76,185 +65,3 @@ func TestTCPSchemeUsesHTTPProxyEnv(t *testing.T) {
|
||||
assert.Equal(t, strings.TrimSpace(result.Stdout()), "99.99.9")
|
||||
assert.Equal(t, received, "docker.acme.example.com:2376")
|
||||
}
|
||||
|
||||
// Test that the prompt command exits with 0
|
||||
// when the user sends SIGINT/SIGTERM to the process
|
||||
func TestPromptExitCode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
dir := fixtures.SetupConfigFile(t)
|
||||
t.Cleanup(dir.Remove)
|
||||
|
||||
defaultCmdOpts := []icmd.CmdOp{
|
||||
fixtures.WithConfig(dir.Path()),
|
||||
fixtures.WithNotary,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
run func(t *testing.T) icmd.Cmd
|
||||
}{
|
||||
{
|
||||
name: "volume prune",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "volume", "prune")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "network prune",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "network", "prune")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container prune",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "container", "prune")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "image prune",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "image", "prune")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "system prune",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "system", "prune")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "revoke trust",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
return icmd.Command("docker", "trust", "revoke", "example/trust-demo")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plugin install",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
skip.If(t, versions.LessThan(environment.DaemonAPIVersion(t), "1.44"))
|
||||
|
||||
pluginDir := testutils.SetupPlugin(t, ctx)
|
||||
t.Cleanup(pluginDir.Remove)
|
||||
|
||||
plugin := "registry:5000/plugin-content-trust-install:latest"
|
||||
|
||||
icmd.RunCommand("docker", "plugin", "create", plugin, pluginDir.Path()).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "push", plugin), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "rm", "-f", plugin), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
return icmd.Command("docker", "plugin", "install", plugin)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plugin upgrade",
|
||||
run: func(t *testing.T) icmd.Cmd {
|
||||
t.Helper()
|
||||
skip.If(t, versions.LessThan(environment.DaemonAPIVersion(t), "1.44"))
|
||||
|
||||
pluginLatestDir := testutils.SetupPlugin(t, ctx)
|
||||
t.Cleanup(pluginLatestDir.Remove)
|
||||
pluginNextDir := testutils.SetupPlugin(t, ctx)
|
||||
t.Cleanup(pluginNextDir.Remove)
|
||||
|
||||
plugin := "registry:5000/plugin-content-trust-upgrade"
|
||||
|
||||
icmd.RunCommand("docker", "plugin", "create", plugin+":latest", pluginLatestDir.Path()).Assert(t, icmd.Success)
|
||||
icmd.RunCommand("docker", "plugin", "create", plugin+":next", pluginNextDir.Path()).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "push", plugin+":latest"), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "push", plugin+":next"), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "rm", "-f", plugin+":latest"), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "rm", "-f", plugin+":next"), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
icmd.RunCmd(icmd.Command("docker", "plugin", "install", "--disable", "--grant-all-permissions", plugin+":latest"), defaultCmdOpts...).Assert(t, icmd.Success)
|
||||
return icmd.Command("docker", "plugin", "upgrade", plugin+":latest", plugin+":next")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run("case="+tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
bufioWriter := bufio.NewWriter(buf)
|
||||
|
||||
writeDone := make(chan struct{})
|
||||
w := test.NewWriterWithHook(bufioWriter, func(p []byte) {
|
||||
writeDone <- struct{}{}
|
||||
})
|
||||
|
||||
drainChCtx, drainChCtxCancel := context.WithCancel(ctx)
|
||||
t.Cleanup(drainChCtxCancel)
|
||||
|
||||
drainChannel(drainChCtx, writeDone)
|
||||
|
||||
r, _ := io.Pipe()
|
||||
defer r.Close()
|
||||
result := icmd.StartCmd(tc.run(t),
|
||||
append(defaultCmdOpts,
|
||||
icmd.WithStdout(w),
|
||||
icmd.WithStderr(w),
|
||||
icmd.WithStdin(r))...)
|
||||
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return poll.Error(ctx.Err())
|
||||
default:
|
||||
|
||||
if err := bufioWriter.Flush(); err != nil {
|
||||
return poll.Continue(err.Error())
|
||||
}
|
||||
if strings.Contains(buf.String(), "[y/N]") {
|
||||
return poll.Success()
|
||||
}
|
||||
|
||||
return poll.Continue("command did not prompt for confirmation, instead prompted:\n%s\n", buf.String())
|
||||
}
|
||||
}, poll.WithDelay(100*time.Millisecond), poll.WithTimeout(1*time.Second))
|
||||
|
||||
drainChCtxCancel()
|
||||
|
||||
assert.NilError(t, result.Cmd.Process.Signal(syscall.SIGINT))
|
||||
|
||||
proc, err := result.Cmd.Process.Wait()
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, proc.ExitCode(), 0, "expected exit code to be 0, got %d", proc.ExitCode())
|
||||
|
||||
processCtx, processCtxCancel := context.WithTimeout(ctx, time.Second)
|
||||
t.Cleanup(processCtxCancel)
|
||||
|
||||
select {
|
||||
case <-processCtx.Done():
|
||||
t.Fatal("timed out waiting for new line after process exit")
|
||||
case <-writeDone:
|
||||
buf.Reset()
|
||||
assert.NilError(t, bufioWriter.Flush())
|
||||
assert.Equal(t, buf.String(), "\n", "expected a new line after the process exits from SIGINT")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func drainChannel(ctx context.Context, ch <-chan struct{}) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ch:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@ -9,11 +9,6 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// set by the compile flags to get around content sha being the same
|
||||
var (
|
||||
UNIQUEME string
|
||||
)
|
||||
|
||||
func main() {
|
||||
p, err := filepath.Abs(filepath.Join("run", "docker", "plugins"))
|
||||
if err != nil {
|
||||
@ -1,14 +1,20 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2e/internal/fixtures"
|
||||
"github.com/docker/cli/e2e/testutils"
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/fs"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
@ -25,11 +31,8 @@ func TestInstallWithContentTrust(t *testing.T) {
|
||||
dir := fixtures.SetupConfigFile(t)
|
||||
defer dir.Remove()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
pluginDir := testutils.SetupPlugin(t, ctx)
|
||||
t.Cleanup(pluginDir.Remove)
|
||||
pluginDir := preparePluginDir(t)
|
||||
defer pluginDir.Remove()
|
||||
|
||||
icmd.RunCommand("docker", "plugin", "create", pluginName, pluginDir.Path()).Assert(t, icmd.Success)
|
||||
result := icmd.RunCmd(icmd.Command("docker", "plugin", "push", pluginName),
|
||||
@ -70,3 +73,46 @@ func TestInstallWithContentTrustUntrusted(t *testing.T) {
|
||||
Err: "Error: remote trust data does not exist",
|
||||
})
|
||||
}
|
||||
|
||||
func preparePluginDir(t *testing.T) *fs.Dir {
|
||||
t.Helper()
|
||||
p := &types.PluginConfig{
|
||||
Interface: types.PluginConfigInterface{
|
||||
Socket: "basic.sock",
|
||||
Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}},
|
||||
},
|
||||
Entrypoint: []string{"/basic"},
|
||||
}
|
||||
configJSON, err := json.Marshal(p)
|
||||
assert.NilError(t, err)
|
||||
|
||||
binPath, err := ensureBasicPluginBin()
|
||||
assert.NilError(t, err)
|
||||
|
||||
dir := fs.NewDir(t, "plugin_test",
|
||||
fs.WithFile("config.json", string(configJSON), fs.WithMode(0o644)),
|
||||
fs.WithDir("rootfs", fs.WithMode(0o755)),
|
||||
)
|
||||
icmd.RunCommand("/bin/cp", binPath, dir.Join("rootfs", p.Entrypoint[0])).Assert(t, icmd.Success)
|
||||
return dir
|
||||
}
|
||||
|
||||
func ensureBasicPluginBin() (string, error) {
|
||||
name := "docker-basic-plugin"
|
||||
p, err := exec.LookPath(name)
|
||||
if err == nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
goBin, err := exec.LookPath("/usr/local/go/bin/go")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
|
||||
cmd := exec.Command(goBin, "build", "-o", installPath, "./basic")
|
||||
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
|
||||
}
|
||||
return installPath, nil
|
||||
}
|
||||
|
||||
2
e2e/testdata/Dockerfile.gencerts
vendored
2
e2e/testdata/Dockerfile.gencerts
vendored
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GO_VERSION=1.21.9
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS generated
|
||||
ENV GOTOOLCHAIN=local
|
||||
|
||||
@ -1,102 +0,0 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/fs"
|
||||
"gotest.tools/v3/icmd"
|
||||
)
|
||||
|
||||
//go:embed plugins/*
|
||||
var plugins embed.FS
|
||||
|
||||
// SetupPlugin builds a plugin and creates a temporary
|
||||
// directory with the plugin's config.json and rootfs.
|
||||
func SetupPlugin(t *testing.T, ctx context.Context) *fs.Dir {
|
||||
t.Helper()
|
||||
|
||||
p := &types.PluginConfig{
|
||||
Linux: types.PluginConfigLinux{
|
||||
Capabilities: []string{"CAP_SYS_ADMIN"},
|
||||
},
|
||||
Interface: types.PluginConfigInterface{
|
||||
Socket: "basic.sock",
|
||||
Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}},
|
||||
},
|
||||
Entrypoint: []string{"/basic"},
|
||||
}
|
||||
configJSON, err := json.Marshal(p)
|
||||
assert.NilError(t, err)
|
||||
|
||||
binPath, err := buildPlugin(t, ctx)
|
||||
assert.NilError(t, err)
|
||||
|
||||
dir := fs.NewDir(t, "plugin_test",
|
||||
fs.WithFile("config.json", string(configJSON), fs.WithMode(0o644)),
|
||||
fs.WithDir("rootfs", fs.WithMode(0o755)),
|
||||
)
|
||||
|
||||
icmd.RunCommand("/bin/cp", binPath, dir.Join("rootfs", p.Entrypoint[0])).Assert(t, icmd.Success)
|
||||
return dir
|
||||
}
|
||||
|
||||
// buildPlugin uses Go to build a plugin from one of the source files in the plugins directory.
|
||||
// It returns the path to the built plugin binary.
|
||||
// To allow for multiple plugins to be built in parallel, the plugin is compiled with a unique
|
||||
// identifier in the binary. This is done by setting a linker flag with the -ldflags option.
|
||||
func buildPlugin(t *testing.T, ctx context.Context) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
randomName, err := randomString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
goBin, err := exec.LookPath("/usr/local/go/bin/go")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
installPath := filepath.Join(os.Getenv("GOPATH"), "bin", randomName)
|
||||
|
||||
pluginContent, err := plugins.ReadFile("plugins/basic.go")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dir := fs.NewDir(t, "plugin_build")
|
||||
if err := os.WriteFile(dir.Join("main.go"), pluginContent, 0o644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer dir.Remove()
|
||||
|
||||
cmd := exec.CommandContext(ctx, goBin, "build", "-ldflags",
|
||||
fmt.Sprintf("-X 'main.UNIQUEME=%s'", randomName),
|
||||
"-o", installPath, dir.Join("main.go"))
|
||||
|
||||
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
|
||||
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
|
||||
}
|
||||
|
||||
return installPath, nil
|
||||
}
|
||||
|
||||
func randomString() (string, error) {
|
||||
b := make([]byte, 8)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(b), nil
|
||||
}
|
||||
@ -7,13 +7,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/spf13/cobra"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TerminatePrompt(ctx context.Context, t *testing.T, cmd *cobra.Command, cli *FakeCli) {
|
||||
func TerminatePrompt(ctx context.Context, t *testing.T, cmd *cobra.Command, cli *FakeCli, assertFunc func(*testing.T, error)) {
|
||||
t.Helper()
|
||||
|
||||
errChan := make(chan error)
|
||||
@ -74,6 +73,11 @@ func TerminatePrompt(ctx context.Context, t *testing.T, cmd *cobra.Command, cli
|
||||
t.Logf("command stderr:\n%s\n", cli.ErrBuffer().String())
|
||||
t.Fatalf("command %s did not return after SIGINT", cmd.Name())
|
||||
case err := <-errChan:
|
||||
assert.ErrorIs(t, err, command.ErrPromptTerminated)
|
||||
if assertFunc != nil {
|
||||
assertFunc(t, err)
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, cli.ErrBuffer().String(), "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,22 +4,27 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type writerWithHook struct {
|
||||
// WriterWithHook is an io.Writer that calls a hook function
|
||||
// after every write.
|
||||
// This is useful in testing to wait for a write to complete,
|
||||
// or to check what was written.
|
||||
// To create a WriterWithHook use the NewWriterWithHook function.
|
||||
type WriterWithHook struct {
|
||||
actualWriter io.Writer
|
||||
hook func([]byte)
|
||||
}
|
||||
|
||||
func (w *writerWithHook) Write(p []byte) (n int, err error) {
|
||||
// Write writes p to the actual writer and then calls the hook function.
|
||||
func (w *WriterWithHook) Write(p []byte) (n int, err error) {
|
||||
defer w.hook(p)
|
||||
return w.actualWriter.Write(p)
|
||||
}
|
||||
|
||||
var _ io.Writer = (*writerWithHook)(nil)
|
||||
var _ io.Writer = (*WriterWithHook)(nil)
|
||||
|
||||
// NewWriterWithHook returns a io.Writer that still
|
||||
// writes to the actualWriter but also calls the hook function
|
||||
// after every write. It is useful to use this function when
|
||||
// you need to wait for a writer to complete writing inside a test.
|
||||
func NewWriterWithHook(actualWriter io.Writer, hook func([]byte)) *writerWithHook {
|
||||
return &writerWithHook{actualWriter: actualWriter, hook: hook}
|
||||
// NewWriterWithHook returns a new WriterWithHook that still writes to the actualWriter
|
||||
// but also calls the hook function after every write.
|
||||
// The hook function is useful for testing, or waiting for a write to complete.
|
||||
func NewWriterWithHook(actualWriter io.Writer, hook func([]byte)) *WriterWithHook {
|
||||
return &WriterWithHook{actualWriter: actualWriter, hook: hook}
|
||||
}
|
||||
|
||||
@ -135,18 +135,3 @@ func TestNetworkOptAdvancedSyntaxInvalid(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkOptStringNetOptString(t *testing.T) {
|
||||
networkOpt := &NetworkOpt{}
|
||||
result := networkOpt.String()
|
||||
assert.Check(t, is.Equal("", result))
|
||||
if result != "" {
|
||||
t.Errorf("Expected an empty string, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkOptTypeNetOptType(t *testing.T) {
|
||||
networkOpt := &NetworkOpt{}
|
||||
result := networkOpt.Type()
|
||||
assert.Check(t, is.Equal("network", result))
|
||||
}
|
||||
|
||||
@ -465,18 +465,3 @@ func TestParseLink(t *testing.T) {
|
||||
t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllOrEmptyReturnsNilOrValue(t *testing.T) {
|
||||
opts := NewListOpts(nil)
|
||||
assert.Check(t, is.DeepEqual(opts.GetAllOrEmpty(), []string{}))
|
||||
opts.Set("foo")
|
||||
assert.Check(t, is.DeepEqual(opts.GetAllOrEmpty(), []string{"foo"}))
|
||||
}
|
||||
|
||||
func TestParseCPUsReturnZeroOnInvalidValues(t *testing.T) {
|
||||
resValue, _ := ParseCPUs("foo")
|
||||
var z1 int64 = 0
|
||||
assert.Equal(t, z1, resValue)
|
||||
resValue, _ = ParseCPUs("1e-32")
|
||||
assert.Equal(t, z1, resValue)
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ if [ -z "$CGO_ENABLED" ]; then
|
||||
case "$(go env GOOS)" in
|
||||
linux)
|
||||
case "$(go env GOARCH)" in
|
||||
amd64|arm64|arm|s390x|riscv64)
|
||||
amd64|arm64|arm|s390x)
|
||||
CGO_ENABLED=1
|
||||
;;
|
||||
*)
|
||||
|
||||
23
vendor.mod
23
vendor.mod
@ -8,11 +8,11 @@ go 1.21
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/containerd/containerd v1.7.15
|
||||
github.com/containerd/containerd v1.7.14
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/distribution/reference v0.5.0
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v26.1.2-0.20240508085902-ef1912d8b6ae+incompatible
|
||||
github.com/docker/docker v26.0.2-0.20240418155034-7cef0d9cd1cf+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.8.1
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
@ -23,7 +23,7 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.15
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261
|
||||
github.com/moby/sys/sequential v0.5.0
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/term v0.5.0
|
||||
@ -38,12 +38,6 @@ require (
|
||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/metric v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
@ -58,18 +52,16 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
@ -86,15 +78,14 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/grpc v1.60.1 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
)
|
||||
|
||||
47
vendor.sum
47
vendor.sum
@ -39,8 +39,8 @@ github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmD
|
||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes=
|
||||
github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY=
|
||||
github.com/containerd/containerd v1.7.14 h1:H/XLzbnGuenZEGK+v0RkwTdv2u1QFAruMe5N0GNPJwA=
|
||||
github.com/containerd/containerd v1.7.14/go.mod h1:YMC9Qt5yzNqXx/fO4j/5yYVIHXSRrlB3H7sxkUTvspg=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
@ -57,8 +57,8 @@ github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v26.1.2-0.20240508085902-ef1912d8b6ae+incompatible h1:PcRQNw8eAMTjdnD7+y3IeJcsCKqxHmlT0MmqfNs9Jc4=
|
||||
github.com/docker/docker v26.1.2-0.20240508085902-ef1912d8b6ae+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v26.0.2-0.20240418155034-7cef0d9cd1cf+incompatible h1:RfKz/iws/BQwbCPPTRz0QXFRwvPAH3kYGwjaVKlh6jc=
|
||||
github.com/docker/docker v26.0.2-0.20240418155034-7cef0d9cd1cf+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
|
||||
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
@ -87,8 +87,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
@ -101,8 +101,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@ -176,8 +174,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e h1:4FRRm/5kOaCc+ssRBPmmcQM7b0KHdOgqKob93VnvHPs=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e/go.mod h1:kNy225f/gWAnF8wPftteMc5nbAHhrH+HUfvyjmhFjeQ=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 h1:mjLf2jYrqtIS4LvLzg0gNyJR4rMXS4X5Bg1A4hOhVMs=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261/go.mod h1:oRJU1d0hrkkwCtouwfQGcIAKcVEkclMYoLWocqrg6gI=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||
@ -237,8 +235,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
@ -293,27 +291,19 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJ
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -382,14 +372,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8=
|
||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
|
||||
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
@ -1,32 +0,0 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
[Exponential backoff][exponential backoff wiki]
|
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||
in order to gradually find an acceptable rate.
|
||||
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
* I would like to keep this library as small as possible.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
||||
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
||||
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
||||
161
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
161
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
@ -1,97 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
||||
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
||||
9
vendor/github.com/docker/docker/AUTHORS
generated
vendored
9
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -669,7 +669,6 @@ Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erik Kristensen <erik@erikkristensen.com>
|
||||
Erik Sipsma <erik@sipsma.dev>
|
||||
Erik Sjölund <erik.sjolund@gmail.com>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
@ -732,7 +731,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
|
||||
Ferran Rodenas <frodenas@gmail.com>
|
||||
Filipe Brandenburger <filbranden@google.com>
|
||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian <FWirtz@users.noreply.github.com>
|
||||
@ -877,8 +875,6 @@ Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
huang-jl <1046678590@qq.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
@ -973,7 +969,6 @@ Jannick Fahlbusch <git@jf-projects.de>
|
||||
Januar Wayong <januar@gmail.com>
|
||||
Jared Biel <jared.biel@bolderthinking.com>
|
||||
Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslav Jindrak <dzejrou@gmail.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
@ -1017,7 +1012,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeffrey Morgan <jmorganca@gmail.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
Jenny Gebske <jennifer@gebske.de>
|
||||
Jeongseok Kang <piono623@naver.com>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
||||
@ -1035,7 +1029,6 @@ Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Zeng <anonymousknight96@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jianyong Wu <jianyong.wu@arm.com>
|
||||
@ -1974,7 +1967,6 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||
Sergio Lopez <slp@redhat.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
SeungUkLee <lsy931106@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
@ -2261,7 +2253,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||
voloder <110066198+voloder@users.noreply.github.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Wang Chao <chao.wang@ucloud.cn>
|
||||
|
||||
15
vendor/github.com/docker/docker/pkg/system/stat_illumos.go
generated
vendored
15
vendor/github.com/docker/docker/pkg/system/stat_illumos.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{
|
||||
size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtim,
|
||||
}, nil
|
||||
}
|
||||
67
vendor/github.com/go-logr/logr/README.md
generated
vendored
67
vendor/github.com/go-logr/logr/README.md
generated
vendored
@ -91,12 +91,11 @@ logr design but also left out some parts and changed others:
|
||||
| Adding a name to a logger | `WithName` | no API |
|
||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
|
||||
|
||||
The high-level slog API is explicitly meant to be one of many different APIs
|
||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by
|
||||
some conversion functions.
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
|
||||
package.
|
||||
|
||||
### Inspiration
|
||||
|
||||
@ -146,24 +145,24 @@ There are implementations for the following logging libraries:
|
||||
## slog interoperability
|
||||
|
||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
|
||||
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
|
||||
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
|
||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||
slog API.
|
||||
slog API. `slogr` itself leaves that to the caller.
|
||||
|
||||
### Using a `logr.LogSink` as backend for slog
|
||||
## Using a `logr.Sink` as backend for slog
|
||||
|
||||
Ideally, a logr sink implementation should support both logr and slog by
|
||||
implementing both the normal logr interface(s) and `SlogSink`. Because
|
||||
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
|
||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||
possible to implement both slog.Handler and logr.Sink in the same
|
||||
type](https://github.com/golang/go/issues/59110).
|
||||
|
||||
If both are supported, log calls can go from the high-level APIs to the backend
|
||||
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
|
||||
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
|
||||
convert back and forth without adding additional wrappers, with one exception:
|
||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
log calls.
|
||||
|
||||
Such an implementation should also support values that implement specific
|
||||
@ -188,13 +187,13 @@ Not supporting slog has several drawbacks:
|
||||
These drawbacks are severe enough that applications using a mixture of slog and
|
||||
logr should switch to a different backend.
|
||||
|
||||
### Using a `slog.Handler` as backend for logr
|
||||
## Using a `slog.Handler` as backend for logr
|
||||
|
||||
Using a plain `slog.Handler` without support for logr works better than the
|
||||
other direction:
|
||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||
by negating them.
|
||||
- Stack unwinding is done by the `SlogSink` and the resulting program
|
||||
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
|
||||
counter is passed to the `slog.Handler`.
|
||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||
attribute with `logger` as key and the names separated by slash as value.
|
||||
@ -206,39 +205,27 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
||||
with logr implementations without slog support is not important, then
|
||||
`slog.Valuer` is sufficient.
|
||||
|
||||
### Context support for slog
|
||||
## Context support for slog
|
||||
|
||||
Storing a logger in a `context.Context` is not supported by
|
||||
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
|
||||
used to fill this gap. They store and retrieve a `slog.Logger` pointer
|
||||
under the same context key that is also used by `NewContext` and
|
||||
`FromContext` for `logr.Logger` value.
|
||||
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
|
||||
to fill this gap:
|
||||
|
||||
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
|
||||
automatically convert the `slog.Logger` to a
|
||||
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
|
||||
func HandlerFromContext(ctx context.Context) slog.Handler {
|
||||
logger, err := logr.FromContext(ctx)
|
||||
if err == nil {
|
||||
return slogr.NewSlogHandler(logger)
|
||||
}
|
||||
return slog.Default().Handler()
|
||||
}
|
||||
|
||||
With this approach, binaries which use either slog or logr are as efficient as
|
||||
possible with no unnecessary allocations. This is also why the API stores a
|
||||
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
|
||||
on retrieval would need to allocate one.
|
||||
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
|
||||
return logr.NewContext(ctx, slogr.NewLogr(handler))
|
||||
}
|
||||
|
||||
The downside is that switching back and forth needs more allocations. Because
|
||||
logr is the API that is already in use by different packages, in particular
|
||||
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
|
||||
uses contextual logging.
|
||||
|
||||
An alternative to adding values to a logger and storing that logger in the
|
||||
context is to store the values in the context and to configure a logging
|
||||
backend to extract those values when emitting log entries. This only works when
|
||||
log calls are passed the context, which is not supported by the logr API.
|
||||
|
||||
With the slog API, it is possible, but not
|
||||
required. https://github.com/veqryn/slog-context is a package for slog which
|
||||
provides additional support code for this approach. It also contains wrappers
|
||||
for the context functions in logr, so developers who prefer to not use the logr
|
||||
APIs directly can use those instead and the resulting code will still be
|
||||
interoperable with logr.
|
||||
The downside is that storing and retrieving a `slog.Handler` needs more
|
||||
allocations compared to using a `logr.Logger`. Therefore the recommendation is
|
||||
to use the `logr.Logger` API in code which uses contextual logging.
|
||||
|
||||
## FAQ
|
||||
|
||||
|
||||
33
vendor/github.com/go-logr/logr/context.go
generated
vendored
33
vendor/github.com/go-logr/logr/context.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
|
||||
// the value is always a Logger value. With Go >= 1.21, the value can be a
|
||||
// Logger value or a slog.Logger pointer.
|
||||
type contextKey struct{}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
49
vendor/github.com/go-logr/logr/context_noslog.go
generated
vendored
49
vendor/github.com/go-logr/logr/context_noslog.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
//go:build !go1.21
|
||||
// +build !go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
83
vendor/github.com/go-logr/logr/context_slog.go
generated
vendored
83
vendor/github.com/go-logr/logr/context_slog.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return v, nil
|
||||
case *slog.Logger:
|
||||
return FromSlogHandler(v.Handler()), nil
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
|
||||
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return slog.New(ToSlogHandler(v))
|
||||
case *slog.Logger:
|
||||
return v
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if logger, err := FromContext(ctx); err == nil {
|
||||
return logger
|
||||
}
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
|
||||
// provided slog.Logger.
|
||||
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
185
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
185
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
@ -100,11 +100,6 @@ type Options struct {
|
||||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||
// If not specified, the info level will be logged as "level".
|
||||
// If this is set to "", the info level will not be logged at all.
|
||||
LogInfoLevel *string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
@ -218,10 +213,6 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
if opts.LogInfoLevel == nil {
|
||||
opts.LogInfoLevel = new(string)
|
||||
*opts.LogInfoLevel = "level"
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
@ -236,15 +227,12 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
parentValuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
group string // for slog groups
|
||||
groupDepth int
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
@ -265,62 +253,33 @@ func (f Formatter) render(builtins, args []any) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{') // for the whole line
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
if len(f.valuesStr) > 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
groupDepth := f.groupDepth
|
||||
if f.group != "" {
|
||||
if f.valuesStr != "" || len(args) != 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
} else {
|
||||
// The group was empty
|
||||
groupDepth--
|
||||
}
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
|
||||
for i := 0; i < groupDepth; i++ {
|
||||
buf.WriteByte('}') // for the groups
|
||||
}
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}') // for the whole line
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
@ -339,16 +298,9 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
copied := false
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
if !copied {
|
||||
newList := make([]any, len(kvList))
|
||||
copy(newList, kvList)
|
||||
kvList = newList
|
||||
copied = true
|
||||
}
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
@ -356,7 +308,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(f.comma())
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
@ -364,35 +316,24 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(f.quoted(k, escapeKeys))
|
||||
buf.WriteByte(f.colon())
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) quoted(str string, escape bool) string {
|
||||
if escape {
|
||||
return prettyString(str)
|
||||
}
|
||||
// this is faster
|
||||
return `"` + str + `"`
|
||||
}
|
||||
|
||||
func (f Formatter) comma() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ','
|
||||
}
|
||||
return ' '
|
||||
}
|
||||
|
||||
func (f Formatter) colon() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ':'
|
||||
}
|
||||
return '='
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value any) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
@ -466,12 +407,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
@ -540,7 +481,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
continue
|
||||
}
|
||||
if printComma {
|
||||
buf.WriteByte(f.comma())
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
printComma = true // if we got here, we are rendering a field
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
@ -551,8 +492,10 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteString(f.quoted(name, false))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
@ -577,7 +520,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
@ -591,7 +534,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
@ -613,7 +556,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
@ -763,53 +706,6 @@ func (f Formatter) sanitize(kvList []any) []any {
|
||||
return kvList
|
||||
}
|
||||
|
||||
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||
// the current saved values and starts them anew. This is needed to satisfy
|
||||
// slog.
|
||||
func (f *Formatter) startGroup(group string) {
|
||||
// Unnamed groups are just inlined.
|
||||
if group == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Any saved values can no longer be changed.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
continuing := false
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if f.group != "" && f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||
// is actually rendered (because we have N scopes to close).
|
||||
|
||||
f.parentValuesStr = buf.String()
|
||||
|
||||
// Start collecting new values.
|
||||
f.group = group
|
||||
f.groupDepth++
|
||||
f.valuesStr = ""
|
||||
f.values = nil
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
@ -844,10 +740,7 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
if key := *f.opts.LogInfoLevel; key != "" {
|
||||
args = append(args, key, level)
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
|
||||
105
vendor/github.com/go-logr/logr/funcr/slogsink.go
generated
vendored
105
vendor/github.com/go-logr/logr/funcr/slogsink.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var _ logr.SlogSink = &fnlogger{}
|
||||
|
||||
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||
|
||||
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
return true
|
||||
})
|
||||
|
||||
if record.Level >= slog.LevelError {
|
||||
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
}
|
||||
l.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||
l.startGroup(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, grpKVs)
|
||||
}
|
||||
if attr.Key == "" {
|
||||
// slog says we have to inline these
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else {
|
||||
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||
}
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
||||
43
vendor/github.com/go-logr/logr/logr.go
generated
vendored
43
vendor/github.com/go-logr/logr/logr.go
generated
vendored
@ -207,6 +207,10 @@ limitations under the License.
|
||||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||
// a Logger which discards all log lines.
|
||||
@ -406,6 +410,45 @@ func (l Logger) IsZero() bool {
|
||||
return l.sink == nil
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
|
||||
192
vendor/github.com/go-logr/logr/sloghandler.go
generated
vendored
192
vendor/github.com/go-logr/logr/sloghandler.go
generated
vendored
@ -1,192 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type slogHandler struct {
|
||||
// May be nil, in which case all logs get discarded.
|
||||
sink LogSink
|
||||
// Non-nil if sink is non-nil and implements SlogSink.
|
||||
slogSink SlogSink
|
||||
|
||||
// groupPrefix collects values from WithGroup calls. It gets added as
|
||||
// prefix to value keys when handling a log record.
|
||||
groupPrefix string
|
||||
|
||||
// levelBias can be set when constructing the handler to influence the
|
||||
// slog.Level of log records. A positive levelBias reduces the
|
||||
// slog.Level value. slog has no API to influence this value after the
|
||||
// handler got created, so it can only be set indirectly through
|
||||
// Logger.V.
|
||||
levelBias slog.Level
|
||||
}
|
||||
|
||||
var _ slog.Handler = &slogHandler{}
|
||||
|
||||
// groupSeparator is used to concatenate WithGroup names and attribute keys.
|
||||
const groupSeparator = "."
|
||||
|
||||
// GetLevel is used for black box unit testing.
|
||||
func (l *slogHandler) GetLevel() slog.Level {
|
||||
return l.levelBias
|
||||
}
|
||||
|
||||
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||
}
|
||||
|
||||
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||
if l.slogSink != nil {
|
||||
// Only adjust verbosity level of log entries < slog.LevelError.
|
||||
if record.Level < slog.LevelError {
|
||||
record.Level -= l.levelBias
|
||||
}
|
||||
return l.slogSink.Handle(ctx, record)
|
||||
}
|
||||
|
||||
// No need to check for nil sink here because Handle will only be called
|
||||
// when Enabled returned true.
|
||||
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
return true
|
||||
})
|
||||
if record.Level >= slog.LevelError {
|
||||
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
|
||||
// are called by Handle, code in slog gets skipped.
|
||||
//
|
||||
// This offset currently (Go 1.21.0) works for calls through
|
||||
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
|
||||
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||
// still better than not adjusting at all....
|
||||
//
|
||||
// This cannot be done when constructing the handler because FromSlogHandler needs
|
||||
// access to the original sink without this adjustment. A second copy would
|
||||
// work, but then WithAttrs would have to be called for both of them.
|
||||
func (l *slogHandler) sinkWithCallDepth() LogSink {
|
||||
if sink, ok := l.sink.(CallDepthLogSink); ok {
|
||||
return sink.WithCallDepth(2)
|
||||
}
|
||||
return l.sink
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
if l.sink == nil || len(attrs) == 0 {
|
||||
return l
|
||||
}
|
||||
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
}
|
||||
clone.sink = l.sink.WithValues(kvList...)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if name == "" {
|
||||
// slog says to inline empty groups
|
||||
return l
|
||||
}
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithGroup(name)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
prefix := groupPrefix
|
||||
if attr.Key != "" {
|
||||
prefix = addPrefix(groupPrefix, attr.Key)
|
||||
}
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, prefix, grpKVs)
|
||||
}
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
func addPrefix(prefix, name string) string {
|
||||
if prefix == "" {
|
||||
return name
|
||||
}
|
||||
if name == "" {
|
||||
return prefix
|
||||
}
|
||||
return prefix + groupSeparator + name
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
result += l.levelBias // in case the original Logger had a V level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
||||
100
vendor/github.com/go-logr/logr/slogr.go
generated
vendored
100
vendor/github.com/go-logr/logr/slogr.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromSlogHandler returns a Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
func FromSlogHandler(handler slog.Handler) Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return Discard()
|
||||
}
|
||||
return New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return New(&slogSink{handler: handler})
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the Logger:
|
||||
//
|
||||
// logger := <some Logger with 0 as verbosity level>
|
||||
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
func ToSlogHandler(logger Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in FromSlogHandler
|
||||
// and ToSlogHandler.
|
||||
type SlogSink interface {
|
||||
LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user