Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f52814d454 | |||
| 0f03c31ab2 | |||
| 1e259062fc | |||
| 4d6fc331b9 | |||
| 09a46645a0 | |||
| 0d88411f1b | |||
| b315983898 | |||
| 150a25b9ff | |||
| 67f5e3413b | |||
| d96b7869af | |||
| 15de6ce8f7 | |||
| 815be4418f | |||
| ca0fb174cf | |||
| 5c406f5ee4 | |||
| a6335c4226 | |||
| 91d44d6caf | |||
| 49021ad987 | |||
| 1a1a4fc478 | |||
| 6f75c0c8e2 | |||
| 9c10a9c9ac | |||
| 65cf8762d3 | |||
| 9dfe779abb | |||
| dfa98d33ea | |||
| c81e05eed8 |
@ -94,6 +94,8 @@ linters:
|
||||
desc: Use github.com/google/uuid instead.
|
||||
- pkg: "io/ioutil"
|
||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "gopkg.in/yaml.v3"
|
||||
desc: Use go.yaml.in/yaml/v3 instead.
|
||||
|
||||
forbidigo:
|
||||
forbid:
|
||||
|
||||
@ -232,3 +232,7 @@ func (f *fakeClient) ContainerPause(ctx context.Context, containerID string, opt
|
||||
|
||||
return client.ContainerPauseResult{}, nil
|
||||
}
|
||||
|
||||
func (*fakeClient) Ping(_ context.Context, _ client.PingOptions) (client.PingResult, error) {
|
||||
return client.PingResult{}, nil
|
||||
}
|
||||
|
||||
@ -112,7 +112,12 @@ func runCreate(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet,
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
serverInfo, err := dockerCli.Client().Ping(ctx, client.PingOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerCfg, err := parse(flags, copts, serverInfo.OSType)
|
||||
if err != nil {
|
||||
return cli.StatusError{
|
||||
Status: withHelp(err, "create").Error(),
|
||||
|
||||
@ -426,6 +426,9 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// short syntax ([ip:]public:private[/proto])
|
||||
//
|
||||
// TODO(thaJeztah): we need an equivalent that handles the "ip-address" part without depending on the nat package.
|
||||
ports, natPortBindings, err := nat.ParsePortSpecs(convertedOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -101,7 +101,12 @@ func runRun(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, ro
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
serverInfo, err := dockerCli.Client().Ping(ctx, client.PingOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerCfg, err := parse(flags, copts, serverInfo.OSType)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
return cli.StatusError{
|
||||
|
||||
@ -109,7 +109,7 @@ func runImages(ctx context.Context, dockerCLI command.Cli, options imagesOptions
|
||||
|
||||
images := res.Items
|
||||
if !options.all {
|
||||
if _, ok := filters["dangling"]; !ok {
|
||||
if dangling, ok := filters["dangling"]; !ok || dangling["false"] {
|
||||
images = slices.DeleteFunc(images, isDangling)
|
||||
}
|
||||
}
|
||||
@ -127,7 +127,6 @@ func runImages(ctx context.Context, dockerCLI command.Cli, options imagesOptions
|
||||
if useTree {
|
||||
return runTree(ctx, dockerCLI, treeOptions{
|
||||
images: images,
|
||||
all: options.all,
|
||||
filters: filters,
|
||||
expanded: options.tree,
|
||||
})
|
||||
@ -177,7 +176,7 @@ func shouldUseTree(options imagesOptions) (bool, error) {
|
||||
|
||||
// isDangling is a copy of [formatter.isDangling].
|
||||
func isDangling(img image.Summary) bool {
|
||||
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
|
||||
if len(img.RepoTags) == 0 {
|
||||
return true
|
||||
}
|
||||
return len(img.RepoTags) == 1 && img.RepoTags[0] == "<none>:<none>" && len(img.RepoDigests) == 1 && img.RepoDigests[0] == "<none>@<none>"
|
||||
|
||||
@ -4,10 +4,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/moby/moby/api/types/image"
|
||||
"github.com/moby/moby/client"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
@ -116,6 +118,86 @@ func TestNewListCommandAmbiguous(t *testing.T) {
|
||||
golden.Assert(t, cli.ErrBuffer().String(), "list-command-ambiguous.golden")
|
||||
}
|
||||
|
||||
func TestImagesFilterDangling(t *testing.T) {
|
||||
// Create test images with different states
|
||||
items := []image.Summary{
|
||||
{
|
||||
ID: "sha256:87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7",
|
||||
RepoTags: []string{"myimage:latest"},
|
||||
RepoDigests: []string{"myimage@sha256:abc123"},
|
||||
},
|
||||
{
|
||||
ID: "sha256:0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f",
|
||||
RepoTags: []string{},
|
||||
RepoDigests: []string{},
|
||||
},
|
||||
{
|
||||
ID: "sha256:a3a5e715f0cc574a73c3f9bebb6bc24f32ffd5b67b387244c2c909da779a1478",
|
||||
RepoTags: []string{},
|
||||
RepoDigests: []string{"image@sha256:a3a5e715f0cc574a73c3f9bebb6bc24f32ffd5b67b387244c2c909da779a1478"},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
imageListFunc func(options client.ImageListOptions) (client.ImageListResult, error)
|
||||
}{
|
||||
{
|
||||
name: "dangling-true",
|
||||
args: []string{"-f", "dangling=true"},
|
||||
imageListFunc: func(options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
// Verify the filter is passed to the API
|
||||
assert.Check(t, options.Filters["dangling"]["true"])
|
||||
// dangling=true is handled on the server side and returns only dangling images
|
||||
return client.ImageListResult{Items: []image.Summary{items[1], items[2]}}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dangling-false",
|
||||
args: []string{"-f", "dangling=false"},
|
||||
imageListFunc: func(options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
// Verify the filter is passed to the API
|
||||
assert.Check(t, options.Filters["dangling"]["false"])
|
||||
// Return all images including dangling
|
||||
return client.ImageListResult{Items: slices.Clone(items)}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-dangling-filter",
|
||||
args: []string{},
|
||||
imageListFunc: func(options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
// Verify no dangling filter is passed to the API
|
||||
_, exists := options.Filters["dangling"]
|
||||
assert.Check(t, !exists)
|
||||
// Return all images including dangling
|
||||
return client.ImageListResult{Items: slices.Clone(items)}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all-flag",
|
||||
args: []string{"--all"},
|
||||
imageListFunc: func(options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
// Verify the All flag is set
|
||||
assert.Check(t, options.All)
|
||||
// Return all images including dangling
|
||||
return client.ImageListResult{Items: slices.Clone(items)}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{imageListFunc: tc.imageListFunc})
|
||||
cmd := newImagesCommand(cli)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("list-command-filter-dangling.%s.golden", tc.name))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func nilToEmptySlice[T any](s []T) []T {
|
||||
if s == nil {
|
||||
return []T{}
|
||||
|
||||
4
cli/command/image/testdata/list-command-filter-dangling.all-flag.golden
vendored
Normal file
4
cli/command/image/testdata/list-command-filter-dangling.all-flag.golden
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
myimage:latest 87428fc52280 0B 0B
|
||||
<untagged> 0263829989b6 0B 0B
|
||||
<untagged> a3a5e715f0cc 0B 0B
|
||||
2
cli/command/image/testdata/list-command-filter-dangling.dangling-false.golden
vendored
Normal file
2
cli/command/image/testdata/list-command-filter-dangling.dangling-false.golden
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
myimage:latest 87428fc52280 0B 0B
|
||||
3
cli/command/image/testdata/list-command-filter-dangling.dangling-true.golden
vendored
Normal file
3
cli/command/image/testdata/list-command-filter-dangling.dangling-true.golden
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
<untagged> 0263829989b6 0B 0B
|
||||
<untagged> a3a5e715f0cc 0B 0B
|
||||
2
cli/command/image/testdata/list-command-filter-dangling.no-dangling-filter.golden
vendored
Normal file
2
cli/command/image/testdata/list-command-filter-dangling.no-dangling-filter.golden
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
myimage:latest 87428fc52280 0B 0B
|
||||
5
cli/command/image/testdata/tree-command-success.expanded-view-with-platforms.golden
vendored
Normal file
5
cli/command/image/testdata/tree-command-success.expanded-view-with-platforms.golden
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
multiplatform:latest aaaaaaaaaaaa 25.5 MB 20.2 MB U
|
||||
├─ linux/amd64 bbbbbbbbbbbb 12.1 MB 10.0 MB
|
||||
└─ linux/arm64 cccccccccccc 13.4 MB 10.2 MB U
|
||||
|
||||
10
cli/command/image/testdata/tree-command-success.mixed-tagged-untagged-with-children.golden
vendored
Normal file
10
cli/command/image/testdata/tree-command-success.mixed-tagged-untagged-with-children.golden
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
app:v1
|
||||
app:latest 101010101010 30.5 MB 25.2 MB U
|
||||
└─ linux/amd64 202020202020 15.2 MB 12.6 MB U
|
||||
|
||||
<untagged> 303030303030 12.3 MB 10.1 MB
|
||||
└─ linux/arm/v7 404040404040 6.1 MB 5.0 MB
|
||||
|
||||
base:alpine 505050505050 5.5 MB 5.5 MB
|
||||
|
||||
5
cli/command/image/testdata/tree-command-success.untagged-with-platforms.golden
vendored
Normal file
5
cli/command/image/testdata/tree-command-success.untagged-with-platforms.golden
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
<untagged> dddddddddddd 18.5 MB 15.2 MB
|
||||
├─ linux/amd64 eeeeeeeeeeee 9.2 MB 7.6 MB
|
||||
└─ linux/arm64 ffffffffffff 9.3 MB 7.6 MB
|
||||
|
||||
4
cli/command/image/testdata/tree-command-success.width-calculation-untagged.golden
vendored
Normal file
4
cli/command/image/testdata/tree-command-success.width-calculation-untagged.golden
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
IMAGE ID DISK USAGE CONTENT SIZE EXTRA
|
||||
a:1 111111111111 5.5 MB 2.5 MB
|
||||
<untagged> 222222222222 3.2 MB 1.6 MB
|
||||
short:v1 333333333333 7.1 MB 3.5 MB U
|
||||
@ -22,9 +22,10 @@ import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const untaggedName = "<untagged>"
|
||||
|
||||
type treeOptions struct {
|
||||
images []imagetypes.Summary
|
||||
all bool
|
||||
filters client.Filters
|
||||
expanded bool
|
||||
}
|
||||
@ -111,7 +112,7 @@ func runTree(ctx context.Context, dockerCLI command.Cli, opts treeOptions) (int,
|
||||
continue
|
||||
}
|
||||
|
||||
if opts.all && len(sortedTags) == 0 {
|
||||
if len(sortedTags) == 0 {
|
||||
view.images = append(view.images, topImage{
|
||||
Details: topDetails,
|
||||
Children: children,
|
||||
@ -433,7 +434,7 @@ func printChildren(out tui.Output, headers []imgColumn, img topImage, normalColo
|
||||
|
||||
func printNames(out tui.Output, headers []imgColumn, img topImage, color, untaggedColor aec.ANSI) {
|
||||
if len(img.Names) == 0 {
|
||||
_, _ = fmt.Fprint(out, headers[0].Print(untaggedColor, "<untagged>"))
|
||||
_, _ = fmt.Fprint(out, headers[0].Print(untaggedColor, untaggedName))
|
||||
}
|
||||
|
||||
for nameIdx, name := range img.Names {
|
||||
@ -545,7 +546,11 @@ func (h imgColumn) PrintR(clr aec.ANSI, s string) string {
|
||||
func widestFirstColumnValue(headers []imgColumn, images []topImage) int {
|
||||
width := len(headers[0].Title)
|
||||
for _, img := range images {
|
||||
for _, name := range img.Names {
|
||||
names := img.Names
|
||||
if len(names) == 0 {
|
||||
names = []string{untaggedName}
|
||||
}
|
||||
for _, name := range names {
|
||||
if len(name) > width {
|
||||
width = len(name)
|
||||
}
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
func TestPrintImageTreeAnsiTty(t *testing.T) {
|
||||
@ -154,3 +156,200 @@ func TestPrintImageTreeAnsiTty(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintImageTreeGolden(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
view treeView
|
||||
expanded bool
|
||||
}{
|
||||
{
|
||||
name: "width-calculation-untagged",
|
||||
expanded: false,
|
||||
view: treeView{
|
||||
images: []topImage{
|
||||
{
|
||||
Names: []string{"a:1"},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:1111111111111111111111111111111111111111111111111111111111111111",
|
||||
DiskUsage: "5.5 MB",
|
||||
InUse: false,
|
||||
ContentSize: "2.5 MB",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Untagged image name is longer than "a:1"
|
||||
Names: []string{},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:2222222222222222222222222222222222222222222222222222222222222222",
|
||||
DiskUsage: "3.2 MB",
|
||||
InUse: false,
|
||||
ContentSize: "1.6 MB",
|
||||
},
|
||||
},
|
||||
{
|
||||
Names: []string{"short:v1"},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:3333333333333333333333333333333333333333333333333333333333333333",
|
||||
DiskUsage: "7.1 MB",
|
||||
InUse: true,
|
||||
ContentSize: "3.5 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
imageSpacing: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expanded-view-with-platforms",
|
||||
expanded: false,
|
||||
view: treeView{
|
||||
images: []topImage{
|
||||
{
|
||||
Names: []string{"multiplatform:latest"},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
DiskUsage: "25.5 MB",
|
||||
InUse: true,
|
||||
ContentSize: "20.2 MB",
|
||||
},
|
||||
Children: []subImage{
|
||||
{
|
||||
Platform: "linux/amd64",
|
||||
Available: true,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
DiskUsage: "12.1 MB",
|
||||
InUse: false,
|
||||
ContentSize: "10.0 MB",
|
||||
},
|
||||
},
|
||||
{
|
||||
Platform: "linux/arm64",
|
||||
Available: true,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
|
||||
DiskUsage: "13.4 MB",
|
||||
InUse: true,
|
||||
ContentSize: "10.2 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
imageSpacing: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "untagged-with-platforms",
|
||||
expanded: false,
|
||||
view: treeView{
|
||||
images: []topImage{
|
||||
{
|
||||
Names: []string{},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
|
||||
DiskUsage: "18.5 MB",
|
||||
InUse: false,
|
||||
ContentSize: "15.2 MB",
|
||||
},
|
||||
Children: []subImage{
|
||||
{
|
||||
Platform: "linux/amd64",
|
||||
Available: true,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
|
||||
DiskUsage: "9.2 MB",
|
||||
InUse: false,
|
||||
ContentSize: "7.6 MB",
|
||||
},
|
||||
},
|
||||
{
|
||||
Platform: "linux/arm64",
|
||||
Available: false,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
DiskUsage: "9.3 MB",
|
||||
InUse: false,
|
||||
ContentSize: "7.6 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
imageSpacing: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed-tagged-untagged-with-children",
|
||||
expanded: false,
|
||||
view: treeView{
|
||||
images: []topImage{
|
||||
{
|
||||
Names: []string{"app:v1", "app:latest"},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:1010101010101010101010101010101010101010101010101010101010101010",
|
||||
DiskUsage: "30.5 MB",
|
||||
InUse: true,
|
||||
ContentSize: "25.2 MB",
|
||||
},
|
||||
Children: []subImage{
|
||||
{
|
||||
Platform: "linux/amd64",
|
||||
Available: true,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:2020202020202020202020202020202020202020202020202020202020202020",
|
||||
DiskUsage: "15.2 MB",
|
||||
InUse: true,
|
||||
ContentSize: "12.6 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Names: []string{},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:3030303030303030303030303030303030303030303030303030303030303030",
|
||||
DiskUsage: "12.3 MB",
|
||||
InUse: false,
|
||||
ContentSize: "10.1 MB",
|
||||
},
|
||||
Children: []subImage{
|
||||
{
|
||||
Platform: "linux/arm/v7",
|
||||
Available: true,
|
||||
Details: imageDetails{
|
||||
ID: "sha256:4040404040404040404040404040404040404040404040404040404040404040",
|
||||
DiskUsage: "6.1 MB",
|
||||
InUse: false,
|
||||
ContentSize: "5.0 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Names: []string{"base:alpine"},
|
||||
Details: imageDetails{
|
||||
ID: "sha256:5050505050505050505050505050505050505050505050505050505050505050",
|
||||
DiskUsage: "5.5 MB",
|
||||
InUse: false,
|
||||
ContentSize: "5.5 MB",
|
||||
},
|
||||
},
|
||||
},
|
||||
imageSpacing: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.Out().SetIsTerminal(false)
|
||||
|
||||
printImageTree(cli, tc.view)
|
||||
|
||||
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("tree-command-success.%s.golden", tc.name))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ import (
|
||||
composeLoader "github.com/docker/cli/cli/compose/loader"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
// configOptions holds docker stack config options
|
||||
|
||||
@ -51,7 +51,12 @@ func newDeployCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringSliceVarP(&opts.composefiles, "compose-file", "c", []string{}, `Path to a Compose file, or "-" to read from stdin`)
|
||||
flags.SetAnnotation("compose-file", "version", []string{"1.25"})
|
||||
_ = flags.SetAnnotation("compose-file", "version", []string{"1.25"})
|
||||
// Provide tab-completion for filenames. On Bash, this is constrained to the
|
||||
// ".yaml" and ".yml" file-extensions, but this doesn't appear to be supported
|
||||
// by other shells.
|
||||
_ = cmd.MarkFlagFilename("compose-file", "yaml", "yml")
|
||||
|
||||
flags.BoolVar(&opts.sendRegistryAuth, "with-registry-auth", false, "Send registry authentication details to Swarm agents")
|
||||
flags.BoolVar(&opts.prune, "prune", false, "Prune services that are no longer referenced")
|
||||
flags.SetAnnotation("prune", "version", []string{"1.27"})
|
||||
|
||||
@ -28,7 +28,7 @@ import (
|
||||
"github.com/moby/moby/api/types/network"
|
||||
"github.com/moby/moby/client/pkg/versions"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
// Options supported by Load
|
||||
@ -918,8 +918,9 @@ var transformStringToDuration TransformerFunc = func(value any) (any, error) {
|
||||
}
|
||||
|
||||
func toServicePortConfigs(value string) ([]any, error) {
|
||||
var portConfigs []any
|
||||
|
||||
// short syntax ([ip:]public:private[/proto])
|
||||
//
|
||||
// TODO(thaJeztah): we need an equivalent that handles the "ip-address" part without depending on the nat package.
|
||||
ports, portBindings, err := nat.ParsePortSpecs([]string{value})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -931,6 +932,7 @@ func toServicePortConfigs(value string) ([]any, error) {
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
var portConfigs []any
|
||||
for _, key := range keys {
|
||||
// Reuse ConvertPortToPortConfig so that it is consistent
|
||||
port, err := network.ParsePort(key)
|
||||
|
||||
@ -333,7 +333,7 @@ func TestInvalidTopLevelObjectType(t *testing.T) {
|
||||
|
||||
func TestNonStringKeys(t *testing.T) {
|
||||
// FIXME(thaJeztah): opkg.in/yaml.v3, which always unmarshals to a map[string]any, so we cannot produce a customized error for invalid types.
|
||||
t.Skip("not supported by gopkg.in/yaml.v3, which always unmarshals to a map[string]any")
|
||||
t.Skip("not supported by go.yaml.in/yaml/v3, which always unmarshals to a map[string]any")
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
123:
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
@ -1,79 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
|
||||
// FakeStore implements a credentials.Store that only acts as an in memory map
|
||||
type FakeStore struct {
|
||||
store map[string]types.AuthConfig
|
||||
eraseFunc func(serverAddress string) error
|
||||
getFunc func(serverAddress string) (types.AuthConfig, error)
|
||||
getAllFunc func() (map[string]types.AuthConfig, error)
|
||||
storeFunc func(authConfig types.AuthConfig) error
|
||||
}
|
||||
|
||||
// NewFakeStore creates a new file credentials store.
|
||||
func NewFakeStore() credentials.Store {
|
||||
return &FakeStore{store: map[string]types.AuthConfig{}}
|
||||
}
|
||||
|
||||
// SetStore is used to overrides Set function
|
||||
func (c *FakeStore) SetStore(store map[string]types.AuthConfig) {
|
||||
c.store = store
|
||||
}
|
||||
|
||||
// SetEraseFunc is used to overrides Erase function
|
||||
func (c *FakeStore) SetEraseFunc(eraseFunc func(string) error) {
|
||||
c.eraseFunc = eraseFunc
|
||||
}
|
||||
|
||||
// SetGetFunc is used to overrides Get function
|
||||
func (c *FakeStore) SetGetFunc(getFunc func(string) (types.AuthConfig, error)) {
|
||||
c.getFunc = getFunc
|
||||
}
|
||||
|
||||
// SetGetAllFunc is used to overrides GetAll function
|
||||
func (c *FakeStore) SetGetAllFunc(getAllFunc func() (map[string]types.AuthConfig, error)) {
|
||||
c.getAllFunc = getAllFunc
|
||||
}
|
||||
|
||||
// SetStoreFunc is used to override Store function
|
||||
func (c *FakeStore) SetStoreFunc(storeFunc func(types.AuthConfig) error) {
|
||||
c.storeFunc = storeFunc
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the map store
|
||||
func (c *FakeStore) Erase(serverAddress string) error {
|
||||
if c.eraseFunc != nil {
|
||||
return c.eraseFunc(serverAddress)
|
||||
}
|
||||
delete(c.store, serverAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the map store.
|
||||
func (c *FakeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
if c.getFunc != nil {
|
||||
return c.getFunc(serverAddress)
|
||||
}
|
||||
return c.store[serverAddress], nil
|
||||
}
|
||||
|
||||
// GetAll returns the key value pairs of ServerAddress => Username
|
||||
func (c *FakeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
if c.getAllFunc != nil {
|
||||
return c.getAllFunc()
|
||||
}
|
||||
return c.store, nil
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the map store.
|
||||
func (c *FakeStore) Store(authConfig types.AuthConfig) error {
|
||||
if c.storeFunc != nil {
|
||||
return c.storeFunc(authConfig)
|
||||
}
|
||||
c.store[authConfig.ServerAddress] = authConfig
|
||||
return nil
|
||||
}
|
||||
@ -100,7 +100,9 @@ func (p *PortOpt) Set(value string) error {
|
||||
|
||||
p.ports = append(p.ports, pConfig)
|
||||
} else {
|
||||
// short syntax
|
||||
// short syntax ([ip:]public:private[/proto])
|
||||
//
|
||||
// TODO(thaJeztah): we need an equivalent that handles the "ip-address" part without depending on the nat package.
|
||||
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -162,18 +164,17 @@ func ConvertPortToPortConfig(
|
||||
logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), portProto.String())
|
||||
}
|
||||
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
pr, err := network.ParsePortRange(binding.HostPort)
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%d)", binding.HostPort, portProto.Num())
|
||||
}
|
||||
|
||||
for i := startHostPort; i <= endHostPort; i++ {
|
||||
for p := range pr.All() {
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
// TODO Name: ?
|
||||
Protocol: portProto.Proto(),
|
||||
TargetPort: uint32(portProto.Num()),
|
||||
PublishedPort: uint32(i),
|
||||
PublishedPort: uint32(p.Num()),
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
})
|
||||
}
|
||||
|
||||
10
vendor.mod
10
vendor.mod
@ -14,7 +14,7 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7
|
||||
github.com/creack/pty v1.1.24
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli-docs-tool v0.10.0
|
||||
github.com/docker/cli-docs-tool v0.11.0
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.9.4
|
||||
github.com/docker/go-connections v0.6.0
|
||||
@ -38,12 +38,12 @@ require (
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/moby/sys/symlink v0.3.0
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/morikuni/aec v1.1.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/tonistiigi/go-rosetta v0.0.0-20220804170347-3f4430f2d346
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
@ -55,11 +55,11 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.38.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
go.yaml.in/yaml/v3 v3.0.4
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/sys v0.38.0
|
||||
golang.org/x/term v0.37.0
|
||||
golang.org/x/text v0.31.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.2
|
||||
tags.cncf.io/container-device-interface v1.0.1
|
||||
)
|
||||
@ -82,7 +82,7 @@ require (
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
|
||||
18
vendor.sum
18
vendor.sum
@ -36,8 +36,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli-docs-tool v0.10.0 h1:bOD6mKynPQgojQi3s2jgcUWGp/Ebqy1SeCr9VfKQLLU=
|
||||
github.com/docker/cli-docs-tool v0.10.0/go.mod h1:5EM5zPnT2E7yCLERZmrDA234Vwn09fzRHP4aX1qwp1U=
|
||||
github.com/docker/cli-docs-tool v0.11.0 h1:7d8QARFb7QEobizqxmEM7fOteZEHwH/zWgHQtHZEcfE=
|
||||
github.com/docker/cli-docs-tool v0.11.0/go.mod h1:ma8BKiisUo8D6W05XEYIh3oa1UbgrZhi1nowyKFJa8Q=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
|
||||
@ -96,8 +96,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@ -141,8 +141,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
|
||||
github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@ -180,8 +180,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@ -229,6 +229,8 @@ go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOV
|
||||
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
||||
6
vendor/github.com/docker/cli-docs-tool/Dockerfile
generated
vendored
6
vendor/github.com/docker/cli-docs-tool/Dockerfile
generated
vendored
@ -15,9 +15,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
ARG GO_VERSION="1.24"
|
||||
ARG XX_VERSION="1.6.1"
|
||||
ARG GOLANGCI_LINT_VERSION="v2.1.5"
|
||||
ARG ADDLICENSE_VERSION="v1.1.1"
|
||||
ARG XX_VERSION="1.9.0"
|
||||
ARG GOLANGCI_LINT_VERSION="v2.7.1"
|
||||
ARG ADDLICENSE_VERSION="v1.2.0"
|
||||
|
||||
ARG LICENSE_ARGS="-c cli-docs-tool -l apache"
|
||||
ARG LICENSE_FILES=".*\(Dockerfile\|\.go\|\.hcl\|\.sh\)"
|
||||
|
||||
2
vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
generated
vendored
2
vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
type cmdOption struct {
|
||||
|
||||
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -27,6 +27,16 @@ Use the links above for more information on each.
|
||||
|
||||
# changelog
|
||||
|
||||
* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1)
|
||||
* zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
|
||||
* zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
|
||||
* s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
|
||||
* zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
|
||||
* gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
|
||||
* flate: Faster load+store https://github.com/klauspost/compress/pull/1104
|
||||
* flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
|
||||
* flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
|
||||
|
||||
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
|
||||
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
|
||||
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
|
||||
@ -36,6 +46,9 @@ Use the links above for more information on each.
|
||||
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
|
||||
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.17.x</summary>
|
||||
|
||||
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
|
||||
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
|
||||
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
|
||||
@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>See changes to v1.16.x</summary>
|
||||
|
||||
@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
||||
# license
|
||||
|
||||
This code is licensed under the same conditions as the original Go code. See LICENSE file.
|
||||
|
||||
|
||||
2
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
2
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
@ -143,7 +143,7 @@ func (b *bitWriter) flush32() {
|
||||
// flushAlign will flush remaining full bytes and align to next byte boundary.
|
||||
func (b *bitWriter) flushAlign() {
|
||||
nbBytes := (b.nBits + 7) >> 3
|
||||
for i := uint8(0); i < nbBytes; i++ {
|
||||
for i := range nbBytes {
|
||||
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
|
||||
}
|
||||
b.nBits = 0
|
||||
|
||||
2
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
2
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error {
|
||||
if v > largeLimit {
|
||||
s.zeroBits = true
|
||||
}
|
||||
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
|
||||
for range v {
|
||||
tableSymbol[position] = symbol
|
||||
position = (position + step) & tableMask
|
||||
for position > highThreshold {
|
||||
|
||||
2
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@ -85,7 +85,7 @@ func (b *bitWriter) flush32() {
|
||||
// flushAlign will flush remaining full bytes and align to next byte boundary.
|
||||
func (b *bitWriter) flushAlign() {
|
||||
nbBytes := (b.nBits + 7) >> 3
|
||||
for i := uint8(0); i < nbBytes; i++ {
|
||||
for i := range nbBytes {
|
||||
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
|
||||
}
|
||||
b.nBits = 0
|
||||
|
||||
6
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
6
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
||||
offsetIdx := len(s.Out)
|
||||
s.Out = append(s.Out, sixZeros[:]...)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
toDo := src
|
||||
if len(toDo) > segmentSize {
|
||||
toDo = toDo[:segmentSize]
|
||||
@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||
segmentSize := (len(src) + 3) / 4
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
toDo := src
|
||||
if len(toDo) > segmentSize {
|
||||
toDo = toDo[:segmentSize]
|
||||
@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
o := s.tmpOut[i]
|
||||
if len(o) > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
|
||||
14
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
14
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
|
||||
var br [4]bitReaderBytes
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
endsAt := min(offset+remainBytes, len(out))
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
var br [4]bitReaderBytes
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
endsAt := min(offset+remainBytes, len(out))
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
|
||||
7
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
7
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
var br [4]bitReaderShifted
|
||||
// Decode "jump table"
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
endsAt := min(offset+remainBytes, len(out))
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
|
||||
4
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error {
|
||||
for i := range hist[:16] {
|
||||
hist[i] = 0
|
||||
}
|
||||
for n := uint8(0); n < maxSymbolValue; n++ {
|
||||
for n := range maxSymbolValue {
|
||||
v := bitsToWeight[c[n].nBits] & 15
|
||||
huffWeight[n] = v
|
||||
hist[v]++
|
||||
@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
|
||||
for i := range hist[:16] {
|
||||
hist[i] = 0
|
||||
}
|
||||
for n := uint8(0); n < maxSymbolValue; n++ {
|
||||
for n := range maxSymbolValue {
|
||||
v := bitsToWeight[c[n].nBits] & 15
|
||||
huffWeight[n] = v
|
||||
hist[v]++
|
||||
|
||||
4
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
4
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) {
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
func Store64[I Indexer](b []byte, i I, v uint64) {
|
||||
binary.LittleEndian.PutUint64(b[i:], v)
|
||||
}
|
||||
|
||||
9
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
9
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 {
|
||||
|
||||
// Store16 will store v at b.
|
||||
func Store16(b []byte, v uint16) {
|
||||
//binary.LittleEndian.PutUint16(b, v)
|
||||
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store32 will store v at b.
|
||||
func Store32(b []byte, v uint32) {
|
||||
//binary.LittleEndian.PutUint32(b, v)
|
||||
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
//binary.LittleEndian.PutUint64(b, v)
|
||||
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
// Store64 will store v at b[i:].
|
||||
func Store64[I Indexer](b []byte, i I, v uint64) {
|
||||
*(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v
|
||||
}
|
||||
|
||||
2
vendor/github.com/klauspost/compress/internal/snapref/decode.go
generated
vendored
2
vendor/github.com/klauspost/compress/internal/snapref/decode.go
generated
vendored
@ -209,7 +209,7 @@ func (r *Reader) fill() error {
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
for i := range len(magicBody) {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
|
||||
4
vendor/github.com/klauspost/compress/internal/snapref/encode.go
generated
vendored
4
vendor/github.com/klauspost/compress/internal/snapref/encode.go
generated
vendored
@ -20,8 +20,10 @@ import (
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||
panic(ErrTooLarge)
|
||||
} else if len(dst) < n {
|
||||
} else if cap(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
} else {
|
||||
dst = dst[:n]
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
|
||||
2
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
@ -88,7 +88,7 @@ func (b *bitWriter) flush32() {
|
||||
// flushAlign will flush remaining full bytes and align to next byte boundary.
|
||||
func (b *bitWriter) flushAlign() {
|
||||
nbBytes := (b.nBits + 7) >> 3
|
||||
for i := uint8(0); i < nbBytes; i++ {
|
||||
for i := range nbBytes {
|
||||
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
|
||||
}
|
||||
b.nBits = 0
|
||||
|
||||
6
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -54,11 +54,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
huffDecoderPool = sync.Pool{New: func() interface{} {
|
||||
huffDecoderPool = sync.Pool{New: func() any {
|
||||
return &huff0.Scratch{}
|
||||
}}
|
||||
|
||||
fseDecoderPool = sync.Pool{New: func() interface{} {
|
||||
fseDecoderPool = sync.Pool{New: func() any {
|
||||
return &fseDecoder{}
|
||||
}}
|
||||
)
|
||||
@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
if compMode&3 != 0 {
|
||||
return errors.New("corrupt block: reserved bits not zero")
|
||||
}
|
||||
for i := uint(0); i < 3; i++ {
|
||||
for i := range uint(3) {
|
||||
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
|
||||
if debugDecoder {
|
||||
println("Table", tableIndex(i), "is", mode)
|
||||
|
||||
8
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
if cap(dst) == 0 && !d.o.limitToCap {
|
||||
// Allocate len(input) * 2 by default if nothing is provided
|
||||
// and we didn't get frame content size.
|
||||
size := len(input) * 2
|
||||
// Cap to 1 MB.
|
||||
if size > 1<<20 {
|
||||
size = 1 << 20
|
||||
}
|
||||
size := min(
|
||||
// Cap to 1 MB.
|
||||
len(input)*2, 1<<20)
|
||||
if uint64(size) > d.o.maxDecodedSize {
|
||||
size = int(d.o.maxDecodedSize)
|
||||
}
|
||||
|
||||
20
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
20
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
hist := o.History
|
||||
contents := o.Contents
|
||||
debug := o.DebugOut != nil
|
||||
println := func(args ...interface{}) {
|
||||
println := func(args ...any) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintln(o.DebugOut, args...)
|
||||
}
|
||||
}
|
||||
printf := func(s string, args ...interface{}) {
|
||||
printf := func(s string, args ...any) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintf(o.DebugOut, s, args...)
|
||||
}
|
||||
}
|
||||
print := func(args ...interface{}) {
|
||||
print := func(args ...any) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprint(o.DebugOut, args...)
|
||||
}
|
||||
@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
}
|
||||
|
||||
// Literal table
|
||||
avgSize := litTotal
|
||||
if avgSize > huff0.BlockSizeMax/2 {
|
||||
avgSize = huff0.BlockSizeMax / 2
|
||||
}
|
||||
avgSize := min(litTotal, huff0.BlockSizeMax/2)
|
||||
huffBuff := make([]byte, 0, avgSize)
|
||||
// Target size
|
||||
div := litTotal / avgSize
|
||||
if div < 1 {
|
||||
div = 1
|
||||
}
|
||||
div := max(litTotal/avgSize, 1)
|
||||
if debug {
|
||||
println("Huffman weights:")
|
||||
}
|
||||
@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
huffBuff = append(huffBuff, 255)
|
||||
}
|
||||
scratch := &huff0.Scratch{TableLog: 11}
|
||||
for tries := 0; tries < 255; tries++ {
|
||||
for tries := range 255 {
|
||||
scratch = &huff0.Scratch{TableLog: 11}
|
||||
_, _, err = huff0.Compress1X(huffBuff, scratch)
|
||||
if err == nil {
|
||||
@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
|
||||
// Bail out.... Just generate something
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
|
||||
for i := 0; i < 128; i++ {
|
||||
for i := range 128 {
|
||||
huffBuff = append(huffBuff, byte(i))
|
||||
}
|
||||
continue
|
||||
|
||||
10
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
10
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
dictShardBits = 6
|
||||
dictShardBits = 7
|
||||
)
|
||||
|
||||
type fastBase struct {
|
||||
@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
|
||||
// or a window size small enough to contain the input size, if > 0.
|
||||
func (e *fastBase) WindowSize(size int64) int32 {
|
||||
if size > 0 && size < int64(e.maxMatchOff) {
|
||||
b := int32(1) << uint(bits.Len(uint(size)))
|
||||
// Keep minimum window.
|
||||
if b < 1024 {
|
||||
b = 1024
|
||||
}
|
||||
b := max(
|
||||
// Keep minimum window.
|
||||
int32(1)<<uint(bits.Len(uint(size))), 1024)
|
||||
return b
|
||||
}
|
||||
return e.maxMatchOff
|
||||
|
||||
23
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
23
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -158,11 +158,9 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
|
||||
// Use this to estimate literal cost.
|
||||
// Scaled by 10 bits.
|
||||
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
|
||||
// Huffman can never go < 1 bit/byte
|
||||
if bitsPerByte < 1024 {
|
||||
bitsPerByte = 1024
|
||||
}
|
||||
bitsPerByte := max(
|
||||
// Huffman can never go < 1 bit/byte
|
||||
int32((compress.ShannonEntropyBits(src)*1024)/len(src)), 1024)
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
@ -235,10 +233,7 @@ encodeLoop:
|
||||
// Extend candidate match backwards as far as possible.
|
||||
// Do not extend repeats as we can assume they are optimal
|
||||
// and offsets change if s == nextEmit.
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
offset--
|
||||
@ -382,10 +377,7 @@ encodeLoop:
|
||||
nextEmit = s
|
||||
|
||||
// Index skipped...
|
||||
end := s
|
||||
if s > sLimit+4 {
|
||||
end = sLimit + 4
|
||||
}
|
||||
end := min(s, sLimit+4)
|
||||
off := index0 + e.cur
|
||||
for index0 < end {
|
||||
cv0 := load6432(src, index0)
|
||||
@ -444,10 +436,7 @@ encodeLoop:
|
||||
nextEmit = s
|
||||
|
||||
// Index old s + 1 -> s - 1 or sLimit
|
||||
end := s
|
||||
if s > sLimit-4 {
|
||||
end = sLimit - 4
|
||||
}
|
||||
end := min(s, sLimit-4)
|
||||
|
||||
off := index0 + e.cur
|
||||
for index0 < end {
|
||||
|
||||
30
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
30
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -190,10 +190,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -252,10 +249,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -480,10 +474,7 @@ encodeLoop:
|
||||
l := matched
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
@ -719,10 +710,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -783,10 +771,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -1005,10 +990,7 @@ encodeLoop:
|
||||
l := matched
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
|
||||
32
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
32
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -13,7 +13,7 @@ const (
|
||||
dFastLongLen = 8 // Bytes used for table hash
|
||||
|
||||
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
|
||||
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
|
||||
dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard
|
||||
|
||||
dFastShortTableBits = tableBits // Bits used in the short match table
|
||||
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
|
||||
@ -149,10 +149,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -266,10 +263,7 @@ encodeLoop:
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
@ -462,10 +456,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
|
||||
repIndex--
|
||||
start--
|
||||
@ -576,10 +567,7 @@ encodeLoop:
|
||||
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
@ -809,10 +797,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
@ -927,10 +912,7 @@ encodeLoop:
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
|
||||
30
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
30
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -143,10 +143,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
sMin := s - e.maxMatchOff
|
||||
if sMin < 0 {
|
||||
sMin = 0
|
||||
}
|
||||
sMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
|
||||
repIndex--
|
||||
start--
|
||||
@ -223,10 +220,7 @@ encodeLoop:
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
@ -387,10 +381,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
sMin := s - e.maxMatchOff
|
||||
if sMin < 0 {
|
||||
sMin = 0
|
||||
}
|
||||
sMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
|
||||
repIndex--
|
||||
start--
|
||||
@ -469,10 +460,7 @@ encodeLoop:
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
@ -655,10 +643,7 @@ encodeLoop:
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
sMin := s - e.maxMatchOff
|
||||
if sMin < 0 {
|
||||
sMin = 0
|
||||
}
|
||||
sMin := max(s-e.maxMatchOff, 0)
|
||||
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
|
||||
repIndex--
|
||||
start--
|
||||
@ -735,10 +720,7 @@ encodeLoop:
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
tMin := max(s-e.maxMatchOff, 0)
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
|
||||
5
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
|
||||
if d.WindowSize == 0 && d.SingleSegment {
|
||||
// We may not need window in this case.
|
||||
d.WindowSize = d.FrameContentSize
|
||||
if d.WindowSize < MinWindowSize {
|
||||
d.WindowSize = MinWindowSize
|
||||
}
|
||||
d.WindowSize = max(d.FrameContentSize, MinWindowSize)
|
||||
if d.WindowSize > d.o.maxDecodedSize {
|
||||
if debugDecoder {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||
|
||||
2
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error {
|
||||
if v > largeLimit {
|
||||
s.zeroBits = true
|
||||
}
|
||||
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
|
||||
for range v {
|
||||
tableSymbol[position] = symbol
|
||||
position = (position + step) & tableMask
|
||||
for position > highThreshold {
|
||||
|
||||
5
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
out := s.out
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
|
||||
|
||||
if debugDecoder {
|
||||
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
|
||||
|
||||
10
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
10
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
|
||||
br := s.br
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
|
||||
|
||||
ctx := decodeSyncAsmContext{
|
||||
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||
@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC
|
||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
br := s.br
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
|
||||
|
||||
ctx := decodeAsmContext{
|
||||
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||
|
||||
56
vendor/github.com/klauspost/compress/zstd/simple_go124.go
generated
vendored
Normal file
56
vendor/github.com/klauspost/compress/zstd/simple_go124.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2025+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"weak"
|
||||
)
|
||||
|
||||
var weakMu sync.Mutex
|
||||
var simpleEnc weak.Pointer[Encoder]
|
||||
var simpleDec weak.Pointer[Decoder]
|
||||
|
||||
// EncodeTo appends the encoded data from src to dst.
|
||||
func EncodeTo(dst []byte, src []byte) []byte {
|
||||
weakMu.Lock()
|
||||
enc := simpleEnc.Value()
|
||||
if enc == nil {
|
||||
var err error
|
||||
enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true))
|
||||
if err != nil {
|
||||
panic("failed to create simple encoder: " + err.Error())
|
||||
}
|
||||
simpleEnc = weak.Make(enc)
|
||||
}
|
||||
weakMu.Unlock()
|
||||
|
||||
return enc.EncodeAll(src, dst)
|
||||
}
|
||||
|
||||
// DecodeTo appends the decoded data from src to dst.
|
||||
// The maximum decoded size is 1GiB,
|
||||
// not including what may already be in dst.
|
||||
func DecodeTo(dst []byte, src []byte) ([]byte, error) {
|
||||
weakMu.Lock()
|
||||
dec := simpleDec.Value()
|
||||
if dec == nil {
|
||||
var err error
|
||||
dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30))
|
||||
if err != nil {
|
||||
weakMu.Unlock()
|
||||
return nil, errors.New("failed to create simple decoder: " + err.Error())
|
||||
}
|
||||
runtime.SetFinalizer(dec, func(d *Decoder) {
|
||||
d.Close()
|
||||
})
|
||||
simpleDec = weak.Make(dec)
|
||||
}
|
||||
weakMu.Unlock()
|
||||
return dec.DecodeAll(src, dst)
|
||||
}
|
||||
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
||||
if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
|
||||
return written, r.err
|
||||
}
|
||||
for i := 0; i < len(snappyMagicBody); i++ {
|
||||
for i := range len(snappyMagicBody) {
|
||||
if r.buf[i] != snappyMagicBody[i] {
|
||||
println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
|
||||
r.err = ErrSnappyCorrupt
|
||||
|
||||
2
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
@ -19,7 +19,7 @@ const ZipMethodWinZip = 93
|
||||
const ZipMethodPKWare = 20
|
||||
|
||||
// zipReaderPool is the default reader pool.
|
||||
var zipReaderPool = sync.Pool{New: func() interface{} {
|
||||
var zipReaderPool = sync.Pool{New: func() any {
|
||||
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -98,13 +98,13 @@ var (
|
||||
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
||||
)
|
||||
|
||||
func println(a ...interface{}) {
|
||||
func println(a ...any) {
|
||||
if debug || debugDecoder || debugEncoder {
|
||||
log.Println(a...)
|
||||
}
|
||||
}
|
||||
|
||||
func printf(format string, a ...interface{}) {
|
||||
func printf(format string, a ...any) {
|
||||
if debug || debugDecoder || debugEncoder {
|
||||
log.Printf(format, a...)
|
||||
}
|
||||
|
||||
6
vendor/github.com/morikuni/aec/aec.go
generated
vendored
6
vendor/github.com/morikuni/aec/aec.go
generated
vendored
@ -129,8 +129,10 @@ func init() {
|
||||
All: 2,
|
||||
}
|
||||
|
||||
Save = newAnsi(esc + "s")
|
||||
Restore = newAnsi(esc + "u")
|
||||
// Save use both SCO (ESC[s) and DEC (ESC7) sequences as those were never standardised as part of the ANSI
|
||||
Save = newAnsi(esc + "s" + "\x1b7")
|
||||
// Restore use both SCO (ESC[u) and DEC (ESC8) and DEC sequences as those were never standardised as part of the ANSI
|
||||
Restore = newAnsi(esc + "u" + "\x1b8")
|
||||
Hide = newAnsi(esc + "?25l")
|
||||
Show = newAnsi(esc + "?25h")
|
||||
Report = newAnsi(esc + "6n")
|
||||
|
||||
7
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
7
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
@ -57,3 +57,10 @@ linters:
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
settings:
|
||||
govet:
|
||||
# Disable buildtag check to allow dual build tag syntax (both //go:build and // +build).
|
||||
# This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17.
|
||||
# This can be removed once Cobra requires Go 1.17 or higher.
|
||||
disable:
|
||||
- buildtag
|
||||
|
||||
12
vendor/github.com/spf13/cobra/command.go
generated
vendored
12
vendor/github.com/spf13/cobra/command.go
generated
vendored
@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
|
||||
}
|
||||
}
|
||||
|
||||
var minUsagePadding = 25
|
||||
const minUsagePadding = 25
|
||||
|
||||
// UsagePadding return padding for the usage.
|
||||
func (c *Command) UsagePadding() int {
|
||||
@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int {
|
||||
return c.parent.commandsMaxUseLen
|
||||
}
|
||||
|
||||
var minCommandPathPadding = 11
|
||||
const minCommandPathPadding = 11
|
||||
|
||||
// CommandPathPadding return padding for the command path.
|
||||
func (c *Command) CommandPathPadding() int {
|
||||
@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int {
|
||||
return c.parent.commandsMaxCommandPathLen
|
||||
}
|
||||
|
||||
var minNamePadding = 11
|
||||
const minNamePadding = 11
|
||||
|
||||
// NamePadding returns padding for the name.
|
||||
func (c *Command) NamePadding() int {
|
||||
@ -1939,7 +1939,7 @@ type tmplFunc struct {
|
||||
fn func(io.Writer, interface{}) error
|
||||
}
|
||||
|
||||
var defaultUsageTemplate = `Usage:{{if .Runnable}}
|
||||
const defaultUsageTemplate = `Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
|
||||
|
||||
@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
|
||||
const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
|
||||
|
||||
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
||||
|
||||
@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
|
||||
const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
|
||||
`
|
||||
|
||||
// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync.
|
||||
|
||||
2
vendor/github.com/spf13/cobra/doc/yaml_docs.go
generated
vendored
2
vendor/github.com/spf13/cobra/doc/yaml_docs.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
type cmdOption struct {
|
||||
|
||||
0
vendor/gopkg.in/yaml.v3/LICENSE → vendor/go.yaml.in/yaml/v3/LICENSE
generated
vendored
0
vendor/gopkg.in/yaml.v3/LICENSE → vendor/go.yaml.in/yaml/v3/LICENSE
generated
vendored
0
vendor/gopkg.in/yaml.v3/NOTICE → vendor/go.yaml.in/yaml/v3/NOTICE
generated
vendored
0
vendor/gopkg.in/yaml.v3/NOTICE → vendor/go.yaml.in/yaml/v3/NOTICE
generated
vendored
171
vendor/go.yaml.in/yaml/v3/README.md
generated
vendored
Normal file
171
vendor/go.yaml.in/yaml/v3/README.md
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
go.yaml.in/yaml
|
||||
===============
|
||||
|
||||
YAML Support for the Go Language
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
|
||||
and decode [YAML](https://yaml.org/) values.
|
||||
|
||||
It was originally developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
|
||||
port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
|
||||
parse and generate YAML data quickly and reliably.
|
||||
|
||||
|
||||
## Project Status
|
||||
|
||||
This project started as a fork of the extremely popular [go-yaml](
|
||||
https://github.com/go-yaml/yaml/)
|
||||
project, and is being maintained by the official [YAML organization](
|
||||
https://github.com/yaml/).
|
||||
|
||||
The YAML team took over ongoing maintenance and development of the project after
|
||||
discussion with go-yaml's author, @niemeyer, following his decision to
|
||||
[label the project repository as "unmaintained"](
|
||||
https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
|
||||
|
||||
We have put together a team of dedicated maintainers including representatives
|
||||
of go-yaml's most important downstream projects.
|
||||
|
||||
We will strive to earn the trust of the various go-yaml forks to switch back to
|
||||
this repository as their upstream.
|
||||
|
||||
Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
|
||||
would like to contribute or be involved.
|
||||
|
||||
|
||||
## Compatibility
|
||||
|
||||
The `yaml` package supports most of YAML 1.2, but preserves some behavior from
|
||||
1.1 for backwards compatibility.
|
||||
|
||||
Specifically, v3 of the `yaml` package:
|
||||
|
||||
* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
|
||||
decoded into a typed bool value.
|
||||
Otherwise they behave as a string.
|
||||
Booleans in YAML 1.2 are `true`/`false` only.
|
||||
* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
|
||||
`0o777` as specified in YAML 1.2, because most parsers still use the old
|
||||
format.
|
||||
Octals in the `0o777` format are supported though, so new files work.
|
||||
* Does not support base-60 floats.
|
||||
These are gone from YAML 1.2, and were actually never supported by this
|
||||
package as it's clearly a poor choice.
|
||||
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
The import path for the package is *go.yaml.in/yaml/v3*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
```bash
|
||||
go get go.yaml.in/yaml/v3
|
||||
```
|
||||
|
||||
|
||||
## API Documentation
|
||||
|
||||
See: <https://pkg.go.dev/go.yaml.in/yaml/v3>
|
||||
|
||||
|
||||
## API Stability
|
||||
|
||||
The package API for yaml v3 will remain stable as described in [gopkg.in](
|
||||
https://gopkg.in).
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
||||
|
||||
## License
|
||||
|
||||
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
|
||||
Please see the LICENSE file for details.
|
||||
8
vendor/gopkg.in/yaml.v3/apic.go → vendor/go.yaml.in/yaml/v3/apic.go
generated
vendored
8
vendor/gopkg.in/yaml.v3/apic.go → vendor/go.yaml.in/yaml/v3/apic.go
generated
vendored
@ -1,17 +1,17 @@
|
||||
//
|
||||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
24
vendor/gopkg.in/yaml.v3/decode.go → vendor/go.yaml.in/yaml/v3/decode.go
generated
vendored
24
vendor/gopkg.in/yaml.v3/decode.go → vendor/go.yaml.in/yaml/v3/decode.go
generated
vendored
@ -832,10 +832,10 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
|
||||
if d.unmarshal(n.Content[i], k) {
|
||||
if mergedFields != nil {
|
||||
ki := k.Interface()
|
||||
if mergedFields[ki] {
|
||||
if d.getPossiblyUnhashableKey(mergedFields, ki) {
|
||||
continue
|
||||
}
|
||||
mergedFields[ki] = true
|
||||
d.setPossiblyUnhashableKey(mergedFields, ki, true)
|
||||
}
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
@ -956,6 +956,24 @@ func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
failf("%v", err)
|
||||
}
|
||||
}()
|
||||
m[key] = value
|
||||
}
|
||||
|
||||
func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
failf("%v", err)
|
||||
}
|
||||
}()
|
||||
return m[key]
|
||||
}
|
||||
|
||||
func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
|
||||
mergedFields := d.mergedFields
|
||||
if mergedFields == nil {
|
||||
@ -963,7 +981,7 @@ func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
|
||||
for i := 0; i < len(parent.Content); i += 2 {
|
||||
k := reflect.New(ifaceType).Elem()
|
||||
if d.unmarshal(parent.Content[i], k) {
|
||||
d.mergedFields[k.Interface()] = true
|
||||
d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
54
vendor/gopkg.in/yaml.v3/emitterc.go → vendor/go.yaml.in/yaml/v3/emitterc.go
generated
vendored
54
vendor/gopkg.in/yaml.v3/emitterc.go → vendor/go.yaml.in/yaml/v3/emitterc.go
generated
vendored
@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||||
// Check if we need to accumulate more events before emitting.
|
||||
//
|
||||
// We accumulate extra
|
||||
// - 1 event for DOCUMENT-START
|
||||
// - 2 events for SEQUENCE-START
|
||||
// - 3 events for MAPPING-START
|
||||
//
|
||||
// - 1 event for DOCUMENT-START
|
||||
// - 2 events for SEQUENCE-START
|
||||
// - 3 events for MAPPING-START
|
||||
func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
|
||||
if emitter.events_head == len(emitter.events) {
|
||||
return true
|
||||
@ -226,7 +225,7 @@ func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_
|
||||
}
|
||||
|
||||
// Increase the indentation level.
|
||||
func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
|
||||
func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool {
|
||||
emitter.indents = append(emitter.indents, emitter.indent)
|
||||
if emitter.indent < 0 {
|
||||
if flow {
|
||||
@ -241,7 +240,14 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool
|
||||
emitter.indent += 2
|
||||
} else {
|
||||
// Everything else aligns to the chosen indentation.
|
||||
emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
|
||||
emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
|
||||
if compact_seq {
|
||||
// The value compact_seq passed in is almost always set to `false` when this function is called,
|
||||
// except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we
|
||||
// are increasing the indent to account for sequence nodes, which will be correct because we need to
|
||||
// subtract 2 to account for the - at the beginning of the sequence node.
|
||||
emitter.indent = emitter.indent - 2
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
@ -478,6 +484,18 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event
|
||||
return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
|
||||
}
|
||||
|
||||
// yaml_emitter_increase_indent preserves the original signature and delegates to
|
||||
// yaml_emitter_increase_indent_compact without compact-sequence indentation
|
||||
func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
|
||||
return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
|
||||
}
|
||||
|
||||
// yaml_emitter_process_line_comment preserves the original signature and delegates to
|
||||
// yaml_emitter_process_line_comment_linebreak passing false for linebreak
|
||||
func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
|
||||
return yaml_emitter_process_line_comment_linebreak(emitter, false)
|
||||
}
|
||||
|
||||
// Expect the root node.
|
||||
func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||||
emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
|
||||
@ -728,7 +746,16 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e
|
||||
// Expect a block item node.
|
||||
func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
|
||||
if first {
|
||||
if !yaml_emitter_increase_indent(emitter, false, false) {
|
||||
// emitter.mapping context tells us if we are currently in a mapping context.
|
||||
// emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column.
|
||||
// emitter.indentation tells us if the last character was an indentation character.
|
||||
// emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements.
|
||||
// So, `seq` means that we are in a mapping context, and we are either at the first char of the column or
|
||||
// the last character was not an indentation character, and we consider '- ' part of the indentation
|
||||
// for sequence elements.
|
||||
seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) &&
|
||||
emitter.compact_sequence_indent
|
||||
if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -1144,8 +1171,15 @@ func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
|
||||
}
|
||||
|
||||
// Write an line comment.
|
||||
func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
|
||||
func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool {
|
||||
if len(emitter.line_comment) == 0 {
|
||||
// The next 3 lines are needed to resolve an issue with leading newlines
|
||||
// See https://github.com/go-yaml/yaml/issues/755
|
||||
// When linebreak is set to true, put_break will be called and will add
|
||||
// the needed newline.
|
||||
if linebreak && !put_break(emitter) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if !emitter.whitespace {
|
||||
@ -1894,7 +1928,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo
|
||||
if !yaml_emitter_write_block_scalar_hints(emitter, value) {
|
||||
return false
|
||||
}
|
||||
if !yaml_emitter_process_line_comment(emitter) {
|
||||
if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
|
||||
return false
|
||||
}
|
||||
//emitter.indention = true
|
||||
@ -1931,7 +1965,7 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo
|
||||
if !yaml_emitter_write_block_scalar_hints(emitter, value) {
|
||||
return false
|
||||
}
|
||||
if !yaml_emitter_process_line_comment(emitter) {
|
||||
if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
|
||||
return false
|
||||
}
|
||||
|
||||
0
vendor/gopkg.in/yaml.v3/encode.go → vendor/go.yaml.in/yaml/v3/encode.go
generated
vendored
0
vendor/gopkg.in/yaml.v3/encode.go → vendor/go.yaml.in/yaml/v3/encode.go
generated
vendored
142
vendor/gopkg.in/yaml.v3/parserc.go → vendor/go.yaml.in/yaml/v3/parserc.go
generated
vendored
142
vendor/gopkg.in/yaml.v3/parserc.go → vendor/go.yaml.in/yaml/v3/parserc.go
generated
vendored
@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool
|
||||
|
||||
// Parse the production:
|
||||
// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
||||
// ************
|
||||
//
|
||||
// ************
|
||||
func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t)
|
||||
|
||||
// Parse the productions:
|
||||
// implicit_document ::= block_node DOCUMENT-END*
|
||||
// *
|
||||
//
|
||||
// *
|
||||
//
|
||||
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
// *************************
|
||||
//
|
||||
// *************************
|
||||
func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
|
||||
|
||||
token := peek_token(parser)
|
||||
@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t
|
||||
|
||||
// Parse the productions:
|
||||
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
// ***********
|
||||
//
|
||||
// ***********
|
||||
func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event
|
||||
|
||||
// Parse the productions:
|
||||
// implicit_document ::= block_node DOCUMENT-END*
|
||||
// *************
|
||||
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
//
|
||||
// *************
|
||||
//
|
||||
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t)
|
||||
|
||||
// Parse the productions:
|
||||
// block_node_or_indentless_sequence ::=
|
||||
// ALIAS
|
||||
// *****
|
||||
// | properties (block_content | indentless_block_sequence)?
|
||||
// ********** *
|
||||
// | block_content | indentless_block_sequence
|
||||
// *
|
||||
//
|
||||
// ALIAS
|
||||
// *****
|
||||
// | properties (block_content | indentless_block_sequence)?
|
||||
// ********** *
|
||||
// | block_content | indentless_block_sequence
|
||||
// *
|
||||
//
|
||||
// block_node ::= ALIAS
|
||||
// *****
|
||||
// | properties block_content?
|
||||
// ********** *
|
||||
// | block_content
|
||||
// *
|
||||
//
|
||||
// *****
|
||||
// | properties block_content?
|
||||
// ********** *
|
||||
// | block_content
|
||||
// *
|
||||
//
|
||||
// flow_node ::= ALIAS
|
||||
// *****
|
||||
// | properties flow_content?
|
||||
// ********** *
|
||||
// | flow_content
|
||||
// *
|
||||
//
|
||||
// *****
|
||||
// | properties flow_content?
|
||||
// ********** *
|
||||
// | flow_content
|
||||
// *
|
||||
//
|
||||
// properties ::= TAG ANCHOR? | ANCHOR TAG?
|
||||
// *************************
|
||||
//
|
||||
// *************************
|
||||
//
|
||||
// block_content ::= block_collection | flow_collection | SCALAR
|
||||
// ******
|
||||
//
|
||||
// ******
|
||||
//
|
||||
// flow_content ::= flow_collection | SCALAR
|
||||
// ******
|
||||
//
|
||||
// ******
|
||||
func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
|
||||
//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
|
||||
|
||||
@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i
|
||||
|
||||
// Parse the productions:
|
||||
// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
||||
// ******************** *********** * *********
|
||||
//
|
||||
// ******************** *********** * *********
|
||||
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||||
if first {
|
||||
token := peek_token(parser)
|
||||
@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e
|
||||
|
||||
// Parse the productions:
|
||||
// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
||||
// *********** *
|
||||
//
|
||||
// *********** *
|
||||
func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
|
||||
|
||||
// Parse the productions:
|
||||
// block_mapping ::= BLOCK-MAPPING_START
|
||||
// *******************
|
||||
// ((KEY block_node_or_indentless_sequence?)?
|
||||
// *** *
|
||||
// (VALUE block_node_or_indentless_sequence?)?)*
|
||||
//
|
||||
// BLOCK-END
|
||||
// *********
|
||||
// *******************
|
||||
// ((KEY block_node_or_indentless_sequence?)?
|
||||
// *** *
|
||||
// (VALUE block_node_or_indentless_sequence?)?)*
|
||||
//
|
||||
// BLOCK-END
|
||||
// *********
|
||||
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||||
if first {
|
||||
token := peek_token(parser)
|
||||
@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even
|
||||
// Parse the productions:
|
||||
// block_mapping ::= BLOCK-MAPPING_START
|
||||
//
|
||||
// ((KEY block_node_or_indentless_sequence?)?
|
||||
//
|
||||
// (VALUE block_node_or_indentless_sequence?)?)*
|
||||
// ***** *
|
||||
// BLOCK-END
|
||||
//
|
||||
// ((KEY block_node_or_indentless_sequence?)?
|
||||
//
|
||||
// (VALUE block_node_or_indentless_sequence?)?)*
|
||||
// ***** *
|
||||
// BLOCK-END
|
||||
func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev
|
||||
|
||||
// Parse the productions:
|
||||
// flow_sequence ::= FLOW-SEQUENCE-START
|
||||
// *******************
|
||||
// (flow_sequence_entry FLOW-ENTRY)*
|
||||
// * **********
|
||||
// flow_sequence_entry?
|
||||
// *
|
||||
// FLOW-SEQUENCE-END
|
||||
// *****************
|
||||
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// *
|
||||
//
|
||||
// *******************
|
||||
// (flow_sequence_entry FLOW-ENTRY)*
|
||||
// * **********
|
||||
// flow_sequence_entry?
|
||||
// *
|
||||
// FLOW-SEQUENCE-END
|
||||
// *****************
|
||||
//
|
||||
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
//
|
||||
// *
|
||||
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||||
if first {
|
||||
token := peek_token(parser)
|
||||
@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev
|
||||
return true
|
||||
}
|
||||
|
||||
//
|
||||
// Parse the productions:
|
||||
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// *** *
|
||||
//
|
||||
// *** *
|
||||
func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev
|
||||
|
||||
// Parse the productions:
|
||||
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// ***** *
|
||||
//
|
||||
// ***** *
|
||||
func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t,
|
||||
|
||||
// Parse the productions:
|
||||
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// *
|
||||
//
|
||||
// *
|
||||
func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev
|
||||
|
||||
// Parse the productions:
|
||||
// flow_mapping ::= FLOW-MAPPING-START
|
||||
// ******************
|
||||
// (flow_mapping_entry FLOW-ENTRY)*
|
||||
// * **********
|
||||
// flow_mapping_entry?
|
||||
// ******************
|
||||
// FLOW-MAPPING-END
|
||||
// ****************
|
||||
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// * *** *
|
||||
//
|
||||
// ******************
|
||||
// (flow_mapping_entry FLOW-ENTRY)*
|
||||
// * **********
|
||||
// flow_mapping_entry?
|
||||
// ******************
|
||||
// FLOW-MAPPING-END
|
||||
// ****************
|
||||
//
|
||||
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// - *** *
|
||||
func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||||
if first {
|
||||
token := peek_token(parser)
|
||||
@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event
|
||||
|
||||
// Parse the productions:
|
||||
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
// * ***** *
|
||||
//
|
||||
// - ***** *
|
||||
func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
|
||||
token := peek_token(parser)
|
||||
if token == nil {
|
||||
8
vendor/gopkg.in/yaml.v3/readerc.go → vendor/go.yaml.in/yaml/v3/readerc.go
generated
vendored
8
vendor/gopkg.in/yaml.v3/readerc.go → vendor/go.yaml.in/yaml/v3/readerc.go
generated
vendored
@ -1,17 +1,17 @@
|
||||
//
|
||||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
0
vendor/gopkg.in/yaml.v3/resolve.go → vendor/go.yaml.in/yaml/v3/resolve.go
generated
vendored
0
vendor/gopkg.in/yaml.v3/resolve.go → vendor/go.yaml.in/yaml/v3/resolve.go
generated
vendored
42
vendor/gopkg.in/yaml.v3/scannerc.go → vendor/go.yaml.in/yaml/v3/scannerc.go
generated
vendored
42
vendor/gopkg.in/yaml.v3/scannerc.go → vendor/go.yaml.in/yaml/v3/scannerc.go
generated
vendored
@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
|
||||
// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
|
||||
//
|
||||
// Scope:
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
//
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
|
||||
// Eat '%'.
|
||||
start_mark := parser.mark
|
||||
@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool
|
||||
// Scan the directive name.
|
||||
//
|
||||
// Scope:
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^
|
||||
//
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^
|
||||
func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
|
||||
// Consume the directive name.
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark
|
||||
// Scan the value of VERSION-DIRECTIVE.
|
||||
//
|
||||
// Scope:
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^^^
|
||||
//
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^^^^^^
|
||||
func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
|
||||
// Eat whitespaces.
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
@ -1797,10 +1798,11 @@ const max_number_length = 2
|
||||
// Scan the version number of VERSION-DIRECTIVE.
|
||||
//
|
||||
// Scope:
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^
|
||||
//
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^
|
||||
// %YAML 1.1 # a comment \n
|
||||
// ^
|
||||
func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
|
||||
|
||||
// Repeat while the next character is digit.
|
||||
@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark
|
||||
// Scan the value of a TAG-DIRECTIVE token.
|
||||
//
|
||||
// Scope:
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
//
|
||||
// %TAG !yaml! tag:yaml.org,2002: \n
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
|
||||
var handle_value, prefix_value []byte
|
||||
|
||||
@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t
|
||||
continue
|
||||
}
|
||||
if parser.buffer[parser.buffer_pos+peek] == '#' {
|
||||
seen := parser.mark.index+peek
|
||||
seen := parser.mark.index + peek
|
||||
for {
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
return false
|
||||
@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t
|
||||
parser.comments = append(parser.comments, yaml_comment_t{
|
||||
token_mark: token_mark,
|
||||
start_mark: start_mark,
|
||||
line: text,
|
||||
line: text,
|
||||
})
|
||||
}
|
||||
return true
|
||||
@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo
|
||||
// the foot is the line below it.
|
||||
var foot_line = -1
|
||||
if scan_mark.line > 0 {
|
||||
foot_line = parser.mark.line-parser.newlines+1
|
||||
foot_line = parser.mark.line - parser.newlines + 1
|
||||
if parser.newlines == 0 && parser.mark.column > 1 {
|
||||
foot_line++
|
||||
}
|
||||
@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo
|
||||
recent_empty = false
|
||||
|
||||
// Consume until after the consumed comment line.
|
||||
seen := parser.mark.index+peek
|
||||
seen := parser.mark.index + peek
|
||||
for {
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
return false
|
||||
0
vendor/gopkg.in/yaml.v3/sorter.go → vendor/go.yaml.in/yaml/v3/sorter.go
generated
vendored
0
vendor/gopkg.in/yaml.v3/sorter.go → vendor/go.yaml.in/yaml/v3/sorter.go
generated
vendored
8
vendor/gopkg.in/yaml.v3/writerc.go → vendor/go.yaml.in/yaml/v3/writerc.go
generated
vendored
8
vendor/gopkg.in/yaml.v3/writerc.go → vendor/go.yaml.in/yaml/v3/writerc.go
generated
vendored
@ -1,17 +1,17 @@
|
||||
//
|
||||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
85
vendor/gopkg.in/yaml.v3/yaml.go → vendor/go.yaml.in/yaml/v3/yaml.go
generated
vendored
85
vendor/gopkg.in/yaml.v3/yaml.go → vendor/go.yaml.in/yaml/v3/yaml.go
generated
vendored
@ -17,8 +17,7 @@
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/go-yaml/yaml
|
||||
//
|
||||
// https://github.com/yaml/go-yaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
@ -75,16 +74,15 @@ type Marshaler interface {
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
//
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, false)
|
||||
}
|
||||
@ -185,36 +183,35 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be excluded if IsZero returns true.
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be excluded if IsZero returns true.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
//
|
||||
// In addition, if the key is "-", the field is ignored.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
@ -278,6 +275,16 @@ func (e *Encoder) SetIndent(spaces int) {
|
||||
e.encoder.indent = spaces
|
||||
}
|
||||
|
||||
// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
|
||||
func (e *Encoder) CompactSeqIndent() {
|
||||
e.encoder.emitter.compact_sequence_indent = true
|
||||
}
|
||||
|
||||
// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
|
||||
func (e *Encoder) DefaultSeqIndent() {
|
||||
e.encoder.emitter.compact_sequence_indent = false
|
||||
}
|
||||
|
||||
// Close closes the encoder by writing any remaining data.
|
||||
// It does not write a stream terminating string "...".
|
||||
func (e *Encoder) Close() (err error) {
|
||||
@ -358,22 +365,21 @@ const (
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// var person struct {
|
||||
// Name string
|
||||
// Address yaml.Node
|
||||
// }
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
//
|
||||
// var person struct {
|
||||
// Name string
|
||||
// Address yaml.Node
|
||||
// }
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
//
|
||||
// Or by itself:
|
||||
//
|
||||
// var person Node
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
//
|
||||
// var person Node
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
type Node struct {
|
||||
// Kind defines whether the node is a document, a mapping, a sequence,
|
||||
// a scalar value, or an alias to another node. The specific data type of
|
||||
// scalar nodes may be obtained via the ShortTag and LongTag methods.
|
||||
Kind Kind
|
||||
Kind Kind
|
||||
|
||||
// Style allows customizing the apperance of the node in the tree.
|
||||
Style Style
|
||||
@ -421,7 +427,6 @@ func (n *Node) IsZero() bool {
|
||||
n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
|
||||
}
|
||||
|
||||
|
||||
// LongTag returns the long form of the tag that indicates the data type for
|
||||
// the node. If the Tag field isn't explicitly defined, one will be computed
|
||||
// based on the node properties.
|
||||
12
vendor/gopkg.in/yaml.v3/yamlh.go → vendor/go.yaml.in/yaml/v3/yamlh.go
generated
vendored
12
vendor/gopkg.in/yaml.v3/yamlh.go → vendor/go.yaml.in/yaml/v3/yamlh.go
generated
vendored
@ -438,7 +438,9 @@ type yaml_document_t struct {
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
// yaml_parser_set_input().
|
||||
//
|
||||
// yaml_parser_set_input().
|
||||
//
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
@ -639,7 +641,6 @@ type yaml_parser_t struct {
|
||||
}
|
||||
|
||||
type yaml_comment_t struct {
|
||||
|
||||
scan_mark yaml_mark_t // Position where scanning for comments started
|
||||
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
|
||||
start_mark yaml_mark_t // Position of '#' comment mark
|
||||
@ -659,13 +660,14 @@ type yaml_comment_t struct {
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
// yaml_emitter_set_output().
|
||||
//
|
||||
// yaml_emitter_set_output().
|
||||
//
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
//
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
@ -742,6 +744,8 @@ type yaml_emitter_t struct {
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
20
vendor/gopkg.in/yaml.v3/yamlprivateh.go → vendor/go.yaml.in/yaml/v3/yamlprivateh.go
generated
vendored
20
vendor/gopkg.in/yaml.v3/yamlprivateh.go → vendor/go.yaml.in/yaml/v3/yamlprivateh.go
generated
vendored
@ -1,17 +1,17 @@
|
||||
//
|
||||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool {
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return (
|
||||
// is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
// is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool {
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return (
|
||||
// is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool {
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return (
|
||||
// is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
150
vendor/gopkg.in/yaml.v3/README.md
generated
vendored
150
vendor/gopkg.in/yaml.v3/README.md
generated
vendored
@ -1,150 +0,0 @@
|
||||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.2, but preserves some behavior
|
||||
from 1.1 for backwards compatibility.
|
||||
|
||||
Specifically, as of v3 of the yaml package:
|
||||
|
||||
- YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
|
||||
decoded into a typed bool value. Otherwise they behave as a string. Booleans
|
||||
in YAML 1.2 are _true/false_ only.
|
||||
- Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
|
||||
as specified in YAML 1.2, because most parsers still use the old format.
|
||||
Octals in the _0o777_ format are supported though, so new files work.
|
||||
- Does not support base-60 floats. These are gone from YAML 1.2, and were
|
||||
actually never supported by this package as it's clearly a poor choice.
|
||||
|
||||
and offers backwards
|
||||
compatibility with YAML 1.1 in some cases.
|
||||
1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v3*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
- [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
|
||||
Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
||||
18
vendor/modules.txt
vendored
18
vendor/modules.txt
vendored
@ -48,7 +48,7 @@ github.com/creack/pty
|
||||
# github.com/distribution/reference v0.6.0
|
||||
## explicit; go 1.20
|
||||
github.com/distribution/reference
|
||||
# github.com/docker/cli-docs-tool v0.10.0
|
||||
# github.com/docker/cli-docs-tool v0.11.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/docker/cli-docs-tool
|
||||
github.com/docker/cli-docs-tool/annotation
|
||||
@ -146,8 +146,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities
|
||||
# github.com/inconshreveable/mousetrap v1.1.0
|
||||
## explicit; go 1.18
|
||||
github.com/inconshreveable/mousetrap
|
||||
# github.com/klauspost/compress v1.18.0
|
||||
## explicit; go 1.22
|
||||
# github.com/klauspost/compress v1.18.2
|
||||
## explicit; go 1.23
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
@ -236,8 +236,8 @@ github.com/moby/sys/userns
|
||||
## explicit; go 1.18
|
||||
github.com/moby/term
|
||||
github.com/moby/term/windows
|
||||
# github.com/morikuni/aec v1.0.0
|
||||
## explicit
|
||||
# github.com/morikuni/aec v1.1.0
|
||||
## explicit; go 1.21
|
||||
github.com/morikuni/aec
|
||||
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
## explicit
|
||||
@ -278,7 +278,7 @@ github.com/russross/blackfriday/v2
|
||||
# github.com/sirupsen/logrus v1.9.3
|
||||
## explicit; go 1.13
|
||||
github.com/sirupsen/logrus
|
||||
# github.com/spf13/cobra v1.10.1
|
||||
# github.com/spf13/cobra v1.10.2
|
||||
## explicit; go 1.15
|
||||
github.com/spf13/cobra
|
||||
github.com/spf13/cobra/doc
|
||||
@ -377,6 +377,9 @@ go.opentelemetry.io/proto/otlp/common/v1
|
||||
go.opentelemetry.io/proto/otlp/metrics/v1
|
||||
go.opentelemetry.io/proto/otlp/resource/v1
|
||||
go.opentelemetry.io/proto/otlp/trace/v1
|
||||
# go.yaml.in/yaml/v3 v3.0.4
|
||||
## explicit; go 1.16
|
||||
go.yaml.in/yaml/v3
|
||||
# golang.org/x/net v0.47.0
|
||||
## explicit; go 1.24.0
|
||||
golang.org/x/net/http/httpguts
|
||||
@ -522,9 +525,6 @@ google.golang.org/protobuf/types/known/fieldmaskpb
|
||||
google.golang.org/protobuf/types/known/structpb
|
||||
google.golang.org/protobuf/types/known/timestamppb
|
||||
google.golang.org/protobuf/types/known/wrapperspb
|
||||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# gotest.tools/v3 v3.5.2
|
||||
## explicit; go 1.17
|
||||
gotest.tools/v3/assert
|
||||
|
||||
Reference in New Issue
Block a user