diff --git a/go.mod b/go.mod
index 14781faf..4c6bfc38 100644
--- a/go.mod
+++ b/go.mod
@@ -5,51 +5,55 @@ go 1.23.5
toolchain go1.24.1
require (
- coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
+ coopcloud.tech/tagcmp v0.0.0-20250427094623-9ea3bbbde8e5
git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c
github.com/AlecAivazis/survey/v2 v2.3.7
- github.com/charmbracelet/bubbletea v1.3.4
+ github.com/charmbracelet/bubbletea v1.3.6
github.com/charmbracelet/lipgloss v1.1.0
- github.com/charmbracelet/log v0.4.1
+ github.com/charmbracelet/log v0.4.2
github.com/distribution/reference v0.6.0
- github.com/docker/cli v28.0.1+incompatible
- github.com/docker/docker v28.0.1+incompatible
+ github.com/docker/cli v28.3.3+incompatible
+ github.com/docker/docker v28.3.3+incompatible
github.com/docker/go-units v0.5.0
- github.com/go-git/go-git/v5 v5.14.0
+ github.com/go-git/go-git/v5 v5.16.2
github.com/google/go-cmp v0.7.0
github.com/leonelquinteros/gotext v1.7.2
github.com/moby/sys/signal v0.7.1
github.com/moby/term v0.5.2
github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.18.0
- golang.org/x/term v0.30.0
+ golang.org/x/term v0.34.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.2
)
require (
- dario.cat/mergo v1.0.1 // indirect
- github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
+ dario.cat/mergo v1.0.2 // indirect
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
- github.com/BurntSushi/toml v1.4.0 // indirect
+ github.com/BurntSushi/toml v1.5.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
- github.com/ProtonMail/go-crypto v1.1.6 // indirect
+ github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
- github.com/charmbracelet/x/ansi v0.8.0 // indirect
+ github.com/charmbracelet/colorprofile v0.3.1 // indirect
+ github.com/charmbracelet/x/ansi v0.10.1 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
- github.com/cloudflare/circl v1.6.0 // indirect
+ github.com/cloudflare/circl v1.6.1 // indirect
+ github.com/containerd/errdefs v1.0.0 // indirect
+ github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
+ github.com/containerd/platforms v0.2.1 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
- github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
@@ -60,13 +64,13 @@ require (
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
@@ -83,8 +87,10 @@ require (
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
- github.com/moby/sys/mountinfo v0.6.2 // indirect
- github.com/moby/sys/user v0.3.0 // indirect
+ github.com/moby/go-archive v0.1.0 // indirect
+ github.com/moby/sys/atomicwriter v0.1.0 // indirect
+ github.com/moby/sys/mountinfo v0.7.2 // indirect
+ github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
@@ -95,42 +101,42 @@ require (
github.com/opencontainers/runc v1.1.13 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
- github.com/pjbgf/sha1cd v0.3.2 // indirect
+ github.com/pjbgf/sha1cd v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.63.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/procfs v0.17.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.3.1 // indirect
- github.com/spf13/pflag v1.0.6 // indirect
+ github.com/spf13/pflag v1.0.7 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
- go.opentelemetry.io/otel v1.35.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
- go.opentelemetry.io/otel/metric v1.35.0 // indirect
- go.opentelemetry.io/otel/sdk v1.35.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
- go.opentelemetry.io/otel/trace v1.35.0 // indirect
- go.opentelemetry.io/proto/otlp v1.5.0 // indirect
- golang.org/x/crypto v0.36.0 // indirect
- golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
- golang.org/x/net v0.37.0 // indirect
- golang.org/x/sync v0.12.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- golang.org/x/time v0.11.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
- google.golang.org/grpc v1.71.0 // indirect
- google.golang.org/protobuf v1.36.5 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.7.1 // indirect
+ golang.org/x/crypto v0.41.0 // indirect
+ golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/sync v0.16.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ golang.org/x/time v0.12.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect
+ google.golang.org/grpc v1.74.2 // indirect
+ google.golang.org/protobuf v1.36.7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
@@ -143,15 +149,15 @@ require (
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.1 // indirect
- github.com/hashicorp/go-retryablehttp v0.7.7
+ github.com/hashicorp/go-retryablehttp v0.7.8
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
- github.com/prometheus/client_golang v1.21.1 // indirect
- github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
+ github.com/prometheus/client_golang v1.23.0 // indirect
+ github.com/sergi/go-diff v1.4.0 // indirect
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.10.0
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
- golang.org/x/sys v0.31.0
+ golang.org/x/sys v0.35.0
)
diff --git a/go.sum b/go.sum
index 62c6ce16..8db9fdbd 100644
--- a/go.sum
+++ b/go.sum
@@ -24,13 +24,19 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb h1:Ws6WEwKXeaYEkfdkX6AqX1XLPuaCeyStEtxbmEJPllk=
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb/go.mod h1:ESVm0wQKcbcFi06jItF3rI7enf4Jt2PvbkWpDDHk1DQ=
+coopcloud.tech/tagcmp v0.0.0-20250427094623-9ea3bbbde8e5 h1:tphJCjFJw9fdjyKnbU0f7f3z5KtYE8VbUcAfu+oHKg8=
+coopcloud.tech/tagcmp v0.0.0-20250427094623-9ea3bbbde8e5/go.mod h1:ESVm0wQKcbcFi06jItF3rI7enf4Jt2PvbkWpDDHk1DQ=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
+dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c h1:oeKnUB79PKYD8D0/unYuu7MRcWryQQWOns8+JL+acrs=
git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c/go.mod h1:fQuhwrpg6qb9NlFXKYi/LysWu1wxjraS8sxyW12CUF0=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@@ -51,6 +57,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -81,6 +89,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDe
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@@ -130,21 +140,32 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo=
+github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU=
+github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
+github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40=
+github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
github.com/charmbracelet/log v0.4.1 h1:6AYnoHKADkghm/vt4neaNEXkxcXLSV2g1rdyFDOpTyk=
github.com/charmbracelet/log v0.4.1/go.mod h1:pXgyTsqsVu4N9hGdHmQ0xEA4RsXof402LX9ZgiITn2I=
+github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig=
+github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw=
github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
+github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
+github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30=
@@ -170,6 +191,8 @@ github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk=
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@@ -215,6 +238,10 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
+github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
@@ -237,6 +264,8 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
@@ -285,6 +314,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
+github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -314,6 +345,8 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.0.1+incompatible h1:g0h5NQNda3/CxIsaZfH4Tyf6vpxFth7PYl3hgCPOKzs=
github.com/docker/cli v28.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo=
+github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -322,6 +355,8 @@ github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
+github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
@@ -330,6 +365,8 @@ github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
+github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
@@ -391,6 +428,8 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60=
github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k=
+github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
+github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -406,6 +445,8 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
@@ -424,6 +465,8 @@ github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@@ -528,9 +571,12 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -544,6 +590,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
+github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@@ -663,14 +711,20 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
+github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
+github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
+github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
@@ -678,6 +732,8 @@ github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5Xt
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
+github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
+github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
@@ -766,6 +822,8 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
+github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY=
+github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -783,6 +841,8 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
+github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
+github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -790,6 +850,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -798,6 +860,8 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -811,6 +875,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
+github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
@@ -820,6 +886,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -832,6 +899,8 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -872,6 +941,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
@@ -950,27 +1021,47 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
+go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
+go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
+go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
+go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -997,6 +1088,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1009,6 +1102,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
+golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4=
+golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1074,6 +1169,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1093,6 +1190,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1174,11 +1273,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1190,6 +1293,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1198,6 +1303,8 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1293,8 +1400,12 @@ google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 h1:IFnXJq3UPB3oBREOodn1v1aGQeZYQclEmvWRMN0PSsY=
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:c8q6Z6OCqnfVIqUFJkCzKcrj8eCvUrz+K4KRzSTuANg=
+google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a h1:DMCgtIAIQGZqJXMVzJF4MV8BlWoJh2ZuFiRdAleyr58=
+google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a/go.mod h1:y2yVLIE/CSMCPXaHnSKXxu1spLPnglFLegmgdY23uuE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a h1:tPE/Kp+x9dMSwUm/uM0JKK0IfdiJkwAbSMSeZBXXJXc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1316,6 +1427,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
+google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
+google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1331,6 +1444,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
+google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
@@ -1370,6 +1485,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
diff --git a/vendor/coopcloud.tech/tagcmp/renovate.json b/vendor/coopcloud.tech/tagcmp/renovate.json
deleted file mode 100644
index 7190a60b..00000000
--- a/vendor/coopcloud.tech/tagcmp/renovate.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "$schema": "https://docs.renovatebot.com/renovate-schema.json"
-}
diff --git a/vendor/dario.cat/mergo/FUNDING.json b/vendor/dario.cat/mergo/FUNDING.json
new file mode 100644
index 00000000..0585e1fe
--- /dev/null
+++ b/vendor/dario.cat/mergo/FUNDING.json
@@ -0,0 +1,7 @@
+{
+ "drips": {
+ "ethereum": {
+ "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930"
+ }
+ }
+}
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
index 0b3c4888..0e4a59af 100644
--- a/vendor/dario.cat/mergo/README.md
+++ b/vendor/dario.cat/mergo/README.md
@@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend
* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
* [go-micro/go-micro](https://github.com/go-micro/go-micro)
* [grafana/loki](https://github.com/grafana/loki)
-* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
* [masterminds/sprig](github.com/Masterminds/sprig)
* [moby/moby](https://github.com/moby/moby)
* [slackhq/nebula](https://github.com/slackhq/nebula)
@@ -191,10 +190,6 @@ func main() {
}
```
-Note: if test are failing due missing package, please execute:
-
- go get gopkg.in/yaml.v3
-
### Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md
index a5de61f7..3788fcc1 100644
--- a/vendor/dario.cat/mergo/SECURITY.md
+++ b/vendor/dario.cat/mergo/SECURITY.md
@@ -4,8 +4,8 @@
| Version | Supported |
| ------- | ------------------ |
-| 0.3.x | :white_check_mark: |
-| < 0.3 | :x: |
+| 1.x.x | :white_check_mark: |
+| < 1.0 | :x: |
## Security contact information
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 639e6c39..235496ee 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Documentation: https://godocs.io/github.com/BurntSushi/toml
+Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 7aaf462c..3fa516ca 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v))
}
+// markDecodedRecursive is a helper to mark any key under the given tmap as
+// decoded, recursing as needed
+func markDecodedRecursive(md *MetaData, tmap map[string]any) {
+ for key := range tmap {
+ md.decoded[md.context.add(key).String()] = struct{}{}
+ if tmap, ok := tmap[key].(map[string]any); ok {
+ md.context = append(md.context, key)
+ markDecodedRecursive(md, tmap)
+ md.context = md.context[0 : len(md.context)-1]
+ }
+ }
+}
+
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
@@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil {
return md.parseErr(err)
}
+ // Assume the Unmarshaler decoded everything, so mark all keys under
+ // this table as decoded.
+ if tmap, ok := data.(map[string]any); ok {
+ markDecodedRecursive(md, tmap)
+ }
+ if aot, ok := data.([]map[string]any); ok {
+ for _, tmap := range aot {
+ markDecodedRecursive(md, tmap)
+ }
+ }
return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
+ d := string(md.data)
return ParseError{
- LastKey: k,
- Position: md.keyInfo[k].pos,
- Line: md.keyInfo[k].pos.Line,
+ Message: err.Error(),
err: err,
- input: string(md.data),
+ LastKey: k,
+ Position: md.keyInfo[k].pos.withCol(d),
+ Line: md.keyInfo[k].pos.Line,
+ input: d,
}
}
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 73366c0d..ac196e7d 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
+ var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
- mapKeysSub = append(mapKeysSub, k)
+ mapKeysSub = append(mapKeysSub, mapKey)
} else {
- mapKeysDirect = append(mapKeysDirect, k)
+ mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
- var writeMapKeys = func(mapKeys []string, trailC bool) {
- sort.Strings(mapKeys)
+ writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
+ sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
- val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
+ val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
- enc.writeKeyValue(Key{mapKey}, val, true)
+ enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
- enc.encode(key.add(mapKey), val)
+ enc.encode(key.add(mapKey.String()), val)
}
}
}
@@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
-const is32Bit = (32 << (^uint(0) >> 63)) == 32
-
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
- if is32Bit {
- // Copy so it works correct on 32bit archs; not clear why this
- // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
- // This also works fine on 64bit, but 32bit archs are somewhat
- // rare and this is a wee bit faster.
- copyStart := make([]int, len(start))
- copy(copyStart, start)
- start = copyStart
- }
+ // Need to make a copy because ... ehm, I don't know why... I guess
+ // allocating a new array can cause it to fail(?)
+ //
+ // Done for: https://github.com/BurntSushi/toml/issues/430
+ // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ start = copyStart
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
@@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
addFields(rt, rv, nil)
- writeFields := func(fields [][]int) {
+ writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex)
@@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
- if fieldIndex[0] != len(fields)-1 {
+ if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
+
+ l := len(fieldsDirect) + len(fieldsSub)
+ writeFields(fieldsDirect, l)
+ writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
index b45a3f45..b7077d3a 100644
--- a/vendor/github.com/BurntSushi/toml/error.go
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -67,21 +67,36 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
+ Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
- Len int // Lenght in bytes.
+ Len int // Length of the error in bytes.
+}
+
+func (p Position) withCol(tomlFile string) Position {
+ var (
+ pos int
+ lines = strings.Split(tomlFile, "\n")
+ )
+ for i := range lines {
+ ll := len(lines[i]) + 1 // +1 for the removed newline
+ if pos+ll >= p.Start {
+ p.Col = p.Start - pos + 1
+ if p.Col < 1 { // Should never happen, but just in case.
+ p.Col = 1
+ }
+ break
+ }
+ pos += ll
+ }
+ return p
}
func (pe ParseError) Error() string {
- msg := pe.Message
- if msg == "" { // Error from errorf()
- msg = pe.err.Error()
- }
-
if pe.LastKey == "" {
- return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+ return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
- pe.Position.Line, pe.LastKey, msg)
+ pe.Position.Line, pe.LastKey, pe.Message)
}
// ErrorWithPosition returns the error with detailed location context.
@@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
- var (
- lines = strings.Split(pe.input, "\n")
- col = pe.column(lines)
- b = new(strings.Builder)
- )
-
- msg := pe.Message
- if msg == "" {
- msg = pe.err.Error()
- }
-
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
+ var (
+ lines = strings.Split(pe.input, "\n")
+ b = new(strings.Builder)
+ )
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
- msg, pe.Position.Line, col+1)
+ pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
- msg, pe.Position.Line, col, col+pe.Position.Len)
+ pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
diff := len(expanded) - len(lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
- fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
@@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
-func (pe ParseError) column(lines []string) int {
- var pos, col int
- for i := range lines {
- ll := len(lines[i]) + 1 // +1 for the removed newline
- if pos+ll >= pe.Position.Start {
- col = pe.Position.Start - pos
- if col < 0 { // Should never happen, but just in case.
- col = 0
- }
- break
- }
- pos += ll
- }
-
- return col
-}
-
func expandTab(s string) string {
var (
b strings.Builder
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index a1016d98..1c3b4770 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
- pos.Line--
+ if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
+ pos.Line--
+ }
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
+ }
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x':
r = lx.peek()
if !isHex(r) {
- lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
- if tomlNext {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' || r == '-' ||
- r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
- (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
- (r >= 0x037f && r <= 0x1fff) ||
- (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
- (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
- (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
- (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
- (r >= 0x10000 && r <= 0xeffff)
- }
-
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' || r == '-'
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') || r == '_' || r == '-'
}
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
index e6145373..0d337026 100644
--- a/vendor/github.com/BurntSushi/toml/meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
- if cap(k) > len(k) {
- return append(k, piece)
- }
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 11ac3108..e3ea8a9a 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
// it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
- //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
}
@@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
- Position: Position{Line: 1, Start: i, Len: 1},
+ Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
+ Message: err.Error(),
err: err,
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: p.pos,
+ Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@@ -123,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
- Position: it.pos,
+ Message: it.err.Error(),
+ err: it.err,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
- err: it.err,
})
}
@@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
}
}
- // isHexis a superset of all the permissable characters surrounding an
+ // isHex is a superset of all the permissible characters surrounding an
// underscore.
accept = isHex(r)
}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
index e44b4573..2e341507 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
@@ -180,6 +180,16 @@ func (dke ErrMalformedMessage) Error() string {
return "openpgp: malformed message " + string(dke)
}
+type messageTooLargeError int
+
+func (e messageTooLargeError) Error() string {
+ return "openpgp: decompressed message size exceeds provided limit"
+}
+
+// ErrMessageTooLarge is returned if the read data from
+// a compressed packet exceeds the provided limit.
+var ErrMessageTooLarge error = messageTooLargeError(0)
+
// ErrEncryptionKeySelection is returned if encryption key selection fails (v2 API).
type ErrEncryptionKeySelection struct {
PrimaryKeyId string
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
index fec41a0e..ef100d37 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
@@ -37,7 +37,7 @@ func (conf *AEADConfig) Mode() AEADMode {
// ChunkSizeByte returns the byte indicating the chunk size. The effective
// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
-// limit to 16 = 4 MiB
+// limit chunkSizeByte to 16 which equals to 2^22 = 4 MiB
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (conf *AEADConfig) ChunkSizeByte() byte {
if conf == nil || conf.ChunkSize == 0 {
@@ -49,8 +49,8 @@ func (conf *AEADConfig) ChunkSizeByte() byte {
switch {
case exponent < 6:
exponent = 6
- case exponent > 16:
- exponent = 16
+ case exponent > 22:
+ exponent = 22
}
return byte(exponent - 6)
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
index 0bcb38ca..931f55a4 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
@@ -98,6 +98,16 @@ func (c *Compressed) parse(r io.Reader) error {
return err
}
+// LimitedBodyReader wraps the provided body reader with a limiter that restricts
+// the number of bytes read to the specified limit.
+// If limit is nil, the reader is unbounded.
+func (c *Compressed) LimitedBodyReader(limit *int64) io.Reader {
+ if limit == nil {
+ return c.Body
+ }
+ return &LimitReader{R: c.Body, N: *limit}
+}
+
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
@@ -159,3 +169,24 @@ func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *Compression
return
}
+
+// LimitReader is an io.Reader that fails with MessageToLarge if read bytes exceed N.
+type LimitReader struct {
+ R io.Reader // underlying reader
+ N int64 // max bytes allowed
+}
+
+func (l *LimitReader) Read(p []byte) (int, error) {
+ if l.N <= 0 {
+ return 0, errors.ErrMessageTooLarge
+ }
+
+ n, err := l.R.Read(p)
+ l.N -= int64(n)
+
+ if err == nil && l.N <= 0 {
+ err = errors.ErrMessageTooLarge
+ }
+
+ return n, err
+}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
index 257398d9..30167ed9 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
@@ -178,6 +178,11 @@ type Config struct {
// When set to true, a key without flags is treated as if all flags are enabled.
// This behavior is consistent with GPG.
InsecureAllowAllKeyFlagsWhenMissing bool
+
+ // MaxDecompressedMessageSize specifies the maximum number of bytes that can be
+ // read from a compressed packet. This serves as an upper limit to prevent
+ // excessively large decompressed messages.
+ MaxDecompressedMessageSize *int64
}
func (c *Config) Random() io.Reader {
@@ -415,6 +420,13 @@ func (c *Config) AllowAllKeyFlagsWhenMissing() bool {
return c.InsecureAllowAllKeyFlagsWhenMissing
}
+func (c *Config) DecompressedMessageSizeLimit() *int64 {
+ if c == nil {
+ return nil
+ }
+ return c.MaxDecompressedMessageSize
+}
+
// BoolPointer is a helper function to set a boolean pointer in the Config.
// e.g., config.CheckPacketSequence = BoolPointer(true)
func BoolPointer(value bool) *bool {
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
index e6dd9b5f..5578797e 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
@@ -259,7 +259,7 @@ FindLiteralData:
}
switch p := p.(type) {
case *packet.Compressed:
- if err := packets.Push(p.Body); err != nil {
+ if err := packets.Push(p.LimitedBodyReader(config.DecompressedMessageSizeLimit())); err != nil {
return nil, err
}
case *packet.OnePassSignature:
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
index b0f6ef7b..84bc27d8 100644
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
+++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
@@ -253,34 +253,12 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
}
var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := algorithm.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
var salt []byte
if signer != nil {
+ if hash, err = selectHash(candidateHashes, config.Hash(), signer); err != nil {
+ return nil, err
+ }
+
var opsVersion = 3
if signer.Version == 6 {
opsVersion = signer.Version
@@ -558,13 +536,34 @@ func (s signatureWriter) Close() error {
return s.encryptedData.Close()
}
+func selectHashForSigningKey(config *packet.Config, signer *packet.PublicKey) crypto.Hash {
+ acceptableHashes := acceptableHashesToWrite(signer)
+ hash, ok := algorithm.HashToHashId(config.Hash())
+ if !ok {
+ return config.Hash()
+ }
+ for _, acceptableHashes := range acceptableHashes {
+ if acceptableHashes == hash {
+ return config.Hash()
+ }
+ }
+ if len(acceptableHashes) > 0 {
+ defaultAcceptedHash, ok := algorithm.HashIdToHash(acceptableHashes[0])
+ if ok {
+ return defaultAcceptedHash
+ }
+ }
+ return config.Hash()
+}
+
func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
sigLifetimeSecs := config.SigLifetime()
+ hash := selectHashForSigningKey(config, signer)
return &packet.Signature{
Version: signer.Version,
SigType: sigType,
PubKeyAlgo: signer.PubKeyAlgo,
- Hash: config.Hash(),
+ Hash: hash,
CreationTime: config.Now(),
IssuerKeyId: &signer.KeyId,
IssuerFingerprint: signer.Fingerprint,
@@ -618,3 +617,74 @@ func handleCompression(compressed io.WriteCloser, candidateCompression []uint8,
}
return data, nil
}
+
+// selectHash selects the preferred hash given the candidateHashes and the configuredHash
+func selectHash(candidateHashes []byte, configuredHash crypto.Hash, signer *packet.PrivateKey) (hash crypto.Hash, err error) {
+ acceptableHashes := acceptableHashesToWrite(&signer.PublicKey)
+ candidateHashes = intersectPreferences(acceptableHashes, candidateHashes)
+
+ for _, hashId := range candidateHashes {
+ if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
+ hash = h
+ break
+ }
+ }
+
+ // If the hash specified by config is a candidate, we'll use that.
+ if configuredHash.Available() {
+ for _, hashId := range candidateHashes {
+ if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
+ hash = h
+ break
+ }
+ }
+ }
+
+ if hash == 0 {
+ if len(acceptableHashes) > 0 {
+ if h, ok := algorithm.HashIdToHash(acceptableHashes[0]); ok {
+ hash = h
+ } else {
+ return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
+ }
+ } else {
+ return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
+ }
+ }
+ return
+}
+
+func acceptableHashesToWrite(singingKey *packet.PublicKey) []uint8 {
+ switch singingKey.PubKeyAlgo {
+ case packet.PubKeyAlgoEd448:
+ return []uint8{
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA3_512),
+ }
+ case packet.PubKeyAlgoECDSA, packet.PubKeyAlgoEdDSA:
+ if curve, err := singingKey.Curve(); err == nil {
+ if curve == packet.Curve448 ||
+ curve == packet.CurveNistP521 ||
+ curve == packet.CurveBrainpoolP512 {
+ return []uint8{
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA3_512),
+ }
+ } else if curve == packet.CurveBrainpoolP384 ||
+ curve == packet.CurveNistP384 {
+ return []uint8{
+ hashToHashId(crypto.SHA384),
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA3_512),
+ }
+ }
+ }
+ }
+ return []uint8{
+ hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA3_256),
+ hashToHashId(crypto.SHA3_512),
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
deleted file mode 100644
index 48482330..00000000
--- a/vendor/github.com/cenkalti/backoff/v4/context.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package backoff
-
-import (
- "context"
- "time"
-)
-
-// BackOffContext is a backoff policy that stops retrying after the context
-// is canceled.
-type BackOffContext interface { // nolint: golint
- BackOff
- Context() context.Context
-}
-
-type backOffContext struct {
- BackOff
- ctx context.Context
-}
-
-// WithContext returns a BackOffContext with context ctx
-//
-// ctx must not be nil
-func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
- if ctx == nil {
- panic("nil context")
- }
-
- if b, ok := b.(*backOffContext); ok {
- return &backOffContext{
- BackOff: b.BackOff,
- ctx: ctx,
- }
- }
-
- return &backOffContext{
- BackOff: b,
- ctx: ctx,
- }
-}
-
-func getContext(b BackOff) context.Context {
- if cb, ok := b.(BackOffContext); ok {
- return cb.Context()
- }
- if tb, ok := b.(*backOffTries); ok {
- return getContext(tb.delegate)
- }
- return context.Background()
-}
-
-func (b *backOffContext) Context() context.Context {
- return b.ctx
-}
-
-func (b *backOffContext) NextBackOff() time.Duration {
- select {
- case <-b.ctx.Done():
- return Stop
- default:
- return b.BackOff.NextBackOff()
- }
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
deleted file mode 100644
index aac99f19..00000000
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package backoff
-
-import (
- "math/rand"
- "time"
-)
-
-/*
-ExponentialBackOff is a backoff implementation that increases the backoff
-period for each retry attempt using a randomization function that grows exponentially.
-
-NextBackOff() is calculated using the following formula:
-
- randomized interval =
- RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
-
-In other words NextBackOff() will range between the randomization factor
-percentage below and above the retry interval.
-
-For example, given the following parameters:
-
- RetryInterval = 2
- RandomizationFactor = 0.5
- Multiplier = 2
-
-the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
-multiplied by the exponential, that is, between 2 and 6 seconds.
-
-Note: MaxInterval caps the RetryInterval and not the randomized interval.
-
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
-
- Request # RetryInterval (seconds) Randomized Interval (seconds)
-
- 1 0.5 [0.25, 0.75]
- 2 0.75 [0.375, 1.125]
- 3 1.125 [0.562, 1.687]
- 4 1.687 [0.8435, 2.53]
- 5 2.53 [1.265, 3.795]
- 6 3.795 [1.897, 5.692]
- 7 5.692 [2.846, 8.538]
- 8 8.538 [4.269, 12.807]
- 9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
-
-Note: Implementation is not thread-safe.
-*/
-type ExponentialBackOff struct {
- InitialInterval time.Duration
- RandomizationFactor float64
- Multiplier float64
- MaxInterval time.Duration
- // After MaxElapsedTime the ExponentialBackOff returns Stop.
- // It never stops if MaxElapsedTime == 0.
- MaxElapsedTime time.Duration
- Stop time.Duration
- Clock Clock
-
- currentInterval time.Duration
- startTime time.Time
-}
-
-// Clock is an interface that returns current time for BackOff.
-type Clock interface {
- Now() time.Time
-}
-
-// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
-type ExponentialBackOffOpts func(*ExponentialBackOff)
-
-// Default values for ExponentialBackOff.
-const (
- DefaultInitialInterval = 500 * time.Millisecond
- DefaultRandomizationFactor = 0.5
- DefaultMultiplier = 1.5
- DefaultMaxInterval = 60 * time.Second
- DefaultMaxElapsedTime = 15 * time.Minute
-)
-
-// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
- b := &ExponentialBackOff{
- InitialInterval: DefaultInitialInterval,
- RandomizationFactor: DefaultRandomizationFactor,
- Multiplier: DefaultMultiplier,
- MaxInterval: DefaultMaxInterval,
- MaxElapsedTime: DefaultMaxElapsedTime,
- Stop: Stop,
- Clock: SystemClock,
- }
- for _, fn := range opts {
- fn(b)
- }
- b.Reset()
- return b
-}
-
-// WithInitialInterval sets the initial interval between retries.
-func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.InitialInterval = duration
- }
-}
-
-// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
-func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.RandomizationFactor = randomizationFactor
- }
-}
-
-// WithMultiplier sets the multiplier for increasing the interval after each retry.
-func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Multiplier = multiplier
- }
-}
-
-// WithMaxInterval sets the maximum interval between retries.
-func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.MaxInterval = duration
- }
-}
-
-// WithMaxElapsedTime sets the maximum total time for retries.
-func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.MaxElapsedTime = duration
- }
-}
-
-// WithRetryStopDuration sets the duration after which retries should stop.
-func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Stop = duration
- }
-}
-
-// WithClockProvider sets the clock used to measure time.
-func WithClockProvider(clock Clock) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Clock = clock
- }
-}
-
-type systemClock struct{}
-
-func (t systemClock) Now() time.Time {
- return time.Now()
-}
-
-// SystemClock implements Clock interface that uses time.Now().
-var SystemClock = systemClock{}
-
-// Reset the interval back to the initial retry interval and restarts the timer.
-// Reset must be called before using b.
-func (b *ExponentialBackOff) Reset() {
- b.currentInterval = b.InitialInterval
- b.startTime = b.Clock.Now()
-}
-
-// NextBackOff calculates the next backoff interval using the formula:
-// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
-func (b *ExponentialBackOff) NextBackOff() time.Duration {
- // Make sure we have not gone over the maximum elapsed time.
- elapsed := b.GetElapsedTime()
- next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
- b.incrementCurrentInterval()
- if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
- return b.Stop
- }
- return next
-}
-
-// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
-// is created and is reset when Reset() is called.
-//
-// The elapsed time is computed using time.Now().UnixNano(). It is
-// safe to call even while the backoff policy is used by a running
-// ticker.
-func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
- return b.Clock.Now().Sub(b.startTime)
-}
-
-// Increments the current interval by multiplying it with the multiplier.
-func (b *ExponentialBackOff) incrementCurrentInterval() {
- // Check for overflow, if overflow is detected set the current interval to the max interval.
- if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
- b.currentInterval = b.MaxInterval
- } else {
- b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
- }
-}
-
-// Returns a random value from the following interval:
-// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
-func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
- if randomizationFactor == 0 {
- return currentInterval // make sure no randomness is used when randomizationFactor is 0.
- }
- var delta = randomizationFactor * float64(currentInterval)
- var minInterval = float64(currentInterval) - delta
- var maxInterval = float64(currentInterval) + delta
-
- // Get a random value from the range [minInterval, maxInterval].
- // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
- // we want a 33% chance for selecting either 1, 2 or 3.
- return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
deleted file mode 100644
index b9c0c51c..00000000
--- a/vendor/github.com/cenkalti/backoff/v4/retry.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package backoff
-
-import (
- "errors"
- "time"
-)
-
-// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
-// The operation will be retried using a backoff policy if it returns an error.
-type OperationWithData[T any] func() (T, error)
-
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-func (o Operation) withEmptyData() OperationWithData[struct{}] {
- return func() (struct{}, error) {
- return struct{}{}, o()
- }
-}
-
-// Notify is a notify-on-error function. It receives an operation error and
-// backoff delay if the operation failed (with an error).
-//
-// NOTE that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error, time.Duration)
-
-// Retry the operation o until it does not return error or BackOff stops.
-// o is guaranteed to be run at least once.
-//
-// If o returns a *PermanentError, the operation is not retried, and the
-// wrapped error is returned.
-//
-// Retry sleeps the goroutine for the duration returned by BackOff after a
-// failed operation returns.
-func Retry(o Operation, b BackOff) error {
- return RetryNotify(o, b, nil)
-}
-
-// RetryWithData is like Retry but returns data in the response too.
-func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
- return RetryNotifyWithData(o, b, nil)
-}
-
-// RetryNotify calls notify function with the error and wait duration
-// for each failed attempt before sleep.
-func RetryNotify(operation Operation, b BackOff, notify Notify) error {
- return RetryNotifyWithTimer(operation, b, notify, nil)
-}
-
-// RetryNotifyWithData is like RetryNotify but returns data in the response too.
-func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
- return doRetryNotify(operation, b, notify, nil)
-}
-
-// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
-// for each failed attempt before sleep.
-// A default timer that uses system timer is used when nil is passed.
-func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
- _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
- return err
-}
-
-// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
-func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
- return doRetryNotify(operation, b, notify, t)
-}
-
-func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
- var (
- err error
- next time.Duration
- res T
- )
- if t == nil {
- t = &defaultTimer{}
- }
-
- defer func() {
- t.Stop()
- }()
-
- ctx := getContext(b)
-
- b.Reset()
- for {
- res, err = operation()
- if err == nil {
- return res, nil
- }
-
- var permanent *PermanentError
- if errors.As(err, &permanent) {
- return res, permanent.Err
- }
-
- if next = b.NextBackOff(); next == Stop {
- if cerr := ctx.Err(); cerr != nil {
- return res, cerr
- }
-
- return res, err
- }
-
- if notify != nil {
- notify(err, next)
- }
-
- t.Start(next)
-
- select {
- case <-ctx.Done():
- return res, ctx.Err()
- case <-t.C():
- }
- }
-}
-
-// PermanentError signals that the operation should not be retried.
-type PermanentError struct {
- Err error
-}
-
-func (e *PermanentError) Error() string {
- return e.Err.Error()
-}
-
-func (e *PermanentError) Unwrap() error {
- return e.Err
-}
-
-func (e *PermanentError) Is(target error) bool {
- _, ok := target.(*PermanentError)
- return ok
-}
-
-// Permanent wraps the given err in a *PermanentError.
-func Permanent(err error) error {
- if err == nil {
- return nil
- }
- return &PermanentError{
- Err: err,
- }
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
deleted file mode 100644
index 28d58ca3..00000000
--- a/vendor/github.com/cenkalti/backoff/v4/tries.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package backoff
-
-import "time"
-
-/*
-WithMaxRetries creates a wrapper around another BackOff, which will
-return Stop if NextBackOff() has been called too many times since
-the last time Reset() was called
-
-Note: Implementation is not thread-safe.
-*/
-func WithMaxRetries(b BackOff, max uint64) BackOff {
- return &backOffTries{delegate: b, maxTries: max}
-}
-
-type backOffTries struct {
- delegate BackOff
- maxTries uint64
- numTries uint64
-}
-
-func (b *backOffTries) NextBackOff() time.Duration {
- if b.maxTries == 0 {
- return Stop
- }
- if b.maxTries > 0 {
- if b.maxTries <= b.numTries {
- return Stop
- }
- b.numTries++
- }
- return b.delegate.NextBackOff()
-}
-
-func (b *backOffTries) Reset() {
- b.numTries = 0
- b.delegate.Reset()
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore
similarity index 100%
rename from vendor/github.com/cenkalti/backoff/v4/.gitignore
rename to vendor/github.com/cenkalti/backoff/v5/.gitignore
diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
new file mode 100644
index 00000000..658c3743
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
@@ -0,0 +1,29 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [5.0.0] - 2024-12-19
+
+### Added
+
+- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
+
+### Changed
+
+- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
+- Retry function now accepts a context.Context.
+- Operation function signature changed to return result (any type) and error.
+
+### Removed
+
+- RetryNotify* and RetryWithData functions. Only single Retry function remains.
+- Optional arguments from ExponentialBackoff constructor.
+- Clock and Timer interfaces.
+
+### Fixed
+
+- The original error is returned from Retry if there's a PermanentError. (#144)
+- The Retry function respects the wrapped PermanentError. (#140)
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE
similarity index 100%
rename from vendor/github.com/cenkalti/backoff/v4/LICENSE
rename to vendor/github.com/cenkalti/backoff/v5/LICENSE
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md
similarity index 64%
rename from vendor/github.com/cenkalti/backoff/v4/README.md
rename to vendor/github.com/cenkalti/backoff/v5/README.md
index 9433004a..4611b1d1 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v5/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
## Usage
-Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
-Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+For most cases, use `Retry` function. See [example_test.go][example] for an example.
+
+If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
## Contributing
@@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
* Please don't send a PR without opening an issue and discussing it first.
* If proposed change is not a common use case, I will probably not accept it.
-[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
-[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
-[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
+[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
+[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go
similarity index 87%
rename from vendor/github.com/cenkalti/backoff/v4/backoff.go
rename to vendor/github.com/cenkalti/backoff/v5/backoff.go
index 3676ee40..dd2b24ca 100644
--- a/vendor/github.com/cenkalti/backoff/v4/backoff.go
+++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go
@@ -15,16 +15,16 @@ import "time"
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the operation,
- // or backoff. Stop to indicate that no more retries should be made.
+ // backoff.Stop to indicate that no more retries should be made.
//
// Example usage:
//
- // duration := backoff.NextBackOff();
- // if (duration == backoff.Stop) {
- // // Do not retry operation.
- // } else {
- // // Sleep for duration and retry operation.
- // }
+ // duration := backoff.NextBackOff()
+ // if duration == backoff.Stop {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
//
NextBackOff() time.Duration
diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go
new file mode 100644
index 00000000..beb2b38a
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/error.go
@@ -0,0 +1,46 @@
+package backoff
+
+import (
+ "fmt"
+ "time"
+)
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
+
+// Error returns a string representation of the Permanent error.
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+// Unwrap returns the wrapped error.
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+// RetryAfterError signals that the operation should be retried after the given duration.
+type RetryAfterError struct {
+ Duration time.Duration
+}
+
+// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
+func RetryAfter(seconds int) error {
+ return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
+}
+
+// Error returns a string representation of the RetryAfter error.
+func (e *RetryAfterError) Error() string {
+ return fmt.Sprintf("retry after %s", e.Duration)
+}
diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go
new file mode 100644
index 00000000..79d425e8
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go
@@ -0,0 +1,118 @@
+package backoff
+
+import (
+ "math/rand/v2"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+Example: Given the following default arguments, for 9 tries the sequence will be:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+
+ currentInterval time.Duration
+}
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+ return &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ }
+}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+//
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ if b.currentInterval == 0 {
+ b.currentInterval = b.InitialInterval
+ }
+
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ return next
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+//
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go
new file mode 100644
index 00000000..32a7f988
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/retry.go
@@ -0,0 +1,139 @@
+package backoff
+
+import (
+ "context"
+ "errors"
+ "time"
+)
+
+// DefaultMaxElapsedTime sets a default limit for the total retry duration.
+const DefaultMaxElapsedTime = 15 * time.Minute
+
+// Operation is a function that attempts an operation and may be retried.
+type Operation[T any] func() (T, error)
+
+// Notify is a function called on operation error with the error and backoff duration.
+type Notify func(error, time.Duration)
+
+// retryOptions holds configuration settings for the retry mechanism.
+type retryOptions struct {
+ BackOff BackOff // Strategy for calculating backoff periods.
+ Timer timer // Timer to manage retry delays.
+ Notify Notify // Optional function to notify on each retry error.
+ MaxTries uint // Maximum number of retry attempts.
+ MaxElapsedTime time.Duration // Maximum total time for all retries.
+}
+
+type RetryOption func(*retryOptions)
+
+// WithBackOff configures a custom backoff strategy.
+func WithBackOff(b BackOff) RetryOption {
+ return func(args *retryOptions) {
+ args.BackOff = b
+ }
+}
+
+// withTimer sets a custom timer for managing delays between retries.
+func withTimer(t timer) RetryOption {
+ return func(args *retryOptions) {
+ args.Timer = t
+ }
+}
+
+// WithNotify sets a notification function to handle retry errors.
+func WithNotify(n Notify) RetryOption {
+ return func(args *retryOptions) {
+ args.Notify = n
+ }
+}
+
+// WithMaxTries limits the number of all attempts.
+func WithMaxTries(n uint) RetryOption {
+ return func(args *retryOptions) {
+ args.MaxTries = n
+ }
+}
+
+// WithMaxElapsedTime limits the total duration for retry attempts.
+func WithMaxElapsedTime(d time.Duration) RetryOption {
+ return func(args *retryOptions) {
+ args.MaxElapsedTime = d
+ }
+}
+
+// Retry attempts the operation until success, a permanent error, or backoff completion.
+// It ensures the operation is executed at least once.
+//
+// Returns the operation result or error if retries are exhausted or context is cancelled.
+func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
+ // Initialize default retry options.
+ args := &retryOptions{
+ BackOff: NewExponentialBackOff(),
+ Timer: &defaultTimer{},
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ }
+
+ // Apply user-provided options to the default settings.
+ for _, opt := range opts {
+ opt(args)
+ }
+
+ defer args.Timer.Stop()
+
+ startedAt := time.Now()
+ args.BackOff.Reset()
+ for numTries := uint(1); ; numTries++ {
+ // Execute the operation.
+ res, err := operation()
+ if err == nil {
+ return res, nil
+ }
+
+ // Stop retrying if maximum tries exceeded.
+ if args.MaxTries > 0 && numTries >= args.MaxTries {
+ return res, err
+ }
+
+ // Handle permanent errors without retrying.
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Unwrap()
+ }
+
+ // Stop retrying if context is cancelled.
+ if cerr := context.Cause(ctx); cerr != nil {
+ return res, cerr
+ }
+
+ // Calculate next backoff duration.
+ next := args.BackOff.NextBackOff()
+ if next == Stop {
+ return res, err
+ }
+
+ // Reset backoff if RetryAfterError is encountered.
+ var retryAfter *RetryAfterError
+ if errors.As(err, &retryAfter) {
+ next = retryAfter.Duration
+ args.BackOff.Reset()
+ }
+
+ // Stop retrying if maximum elapsed time exceeded.
+ if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
+ return res, err
+ }
+
+ // Notify on error if a notifier function is provided.
+ if args.Notify != nil {
+ args.Notify(err, next)
+ }
+
+ // Wait for the next backoff period or context cancellation.
+ args.Timer.Start(next)
+ select {
+ case <-args.Timer.C():
+ case <-ctx.Done():
+ return res, context.Cause(ctx)
+ }
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go
similarity index 80%
rename from vendor/github.com/cenkalti/backoff/v4/ticker.go
rename to vendor/github.com/cenkalti/backoff/v5/ticker.go
index df9d68bc..f0d4b2ae 100644
--- a/vendor/github.com/cenkalti/backoff/v4/ticker.go
+++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go
@@ -1,7 +1,6 @@
package backoff
import (
- "context"
"sync"
"time"
)
@@ -14,8 +13,7 @@ type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
- ctx context.Context
- timer Timer
+ timer timer
stop chan struct{}
stopOnce sync.Once
}
@@ -27,22 +25,12 @@ type Ticker struct {
// provided backoff policy (notably calling NextBackOff or Reset)
// while the ticker is running.
func NewTicker(b BackOff) *Ticker {
- return NewTickerWithTimer(b, &defaultTimer{})
-}
-
-// NewTickerWithTimer returns a new Ticker with a custom timer.
-// A default timer that uses system timer is used when nil is passed.
-func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
- if timer == nil {
- timer = &defaultTimer{}
- }
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: b,
- ctx: getContext(b),
- timer: timer,
+ timer: &defaultTimer{},
stop: make(chan struct{}),
}
t.b.Reset()
@@ -73,8 +61,6 @@ func (t *Ticker) run() {
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
- case <-t.ctx.Done():
- return
}
}
}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go
similarity index 96%
rename from vendor/github.com/cenkalti/backoff/v4/timer.go
rename to vendor/github.com/cenkalti/backoff/v5/timer.go
index 8120d021..a8953097 100644
--- a/vendor/github.com/cenkalti/backoff/v4/timer.go
+++ b/vendor/github.com/cenkalti/backoff/v5/timer.go
@@ -2,7 +2,7 @@ package backoff
import "time"
-type Timer interface {
+type timer interface {
Start(duration time.Duration)
Stop()
C() <-chan time.Time
diff --git a/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml b/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml
deleted file mode 100644
index d325d4fc..00000000
--- a/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-run:
- tests: false
- issues-exit-code: 0
-
-issues:
- include:
- - EXC0001
- - EXC0005
- - EXC0011
- - EXC0012
- - EXC0013
-
- max-issues-per-linter: 0
- max-same-issues: 0
-
-linters:
- enable:
- - exhaustive
- - goconst
- - godot
- - godox
- - mnd
- - gomoddirectives
- - goprintffuncname
- - misspell
- - nakedret
- - nestif
- - noctx
- - nolintlint
- - prealloc
- - wrapcheck
-
- # disable default linters, they are already enabled in .golangci.yml
- disable:
- - errcheck
- - gosimple
- - govet
- - ineffassign
- - staticcheck
- - unused
diff --git a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml
index d6789e01..4fac29c2 100644
--- a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml
+++ b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml
@@ -1,24 +1,22 @@
+version: "2"
run:
tests: false
-
-issues:
- include:
- - EXC0001
- - EXC0005
- - EXC0011
- - EXC0012
- - EXC0013
-
- max-issues-per-linter: 0
- max-same-issues: 0
-
linters:
enable:
- bodyclose
- - gofumpt
- - goimports
+ - exhaustive
+ - goconst
+ - godot
+ - gomoddirectives
+ - goprintffuncname
- gosec
+ - misspell
+ - nakedret
+ - nestif
- nilerr
+ - noctx
+ - nolintlint
+ - prealloc
- revive
- rowserrcheck
- sqlclosecheck
@@ -26,3 +24,17 @@ linters:
- unconvert
- unparam
- whitespace
+ - wrapcheck
+ exclusions:
+ generated: lax
+ presets:
+ - common-false-positives
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ exclusions:
+ generated: lax
diff --git a/vendor/github.com/charmbracelet/bubbletea/README.md b/vendor/github.com/charmbracelet/bubbletea/README.md
index 58dd1828..b3acf900 100644
--- a/vendor/github.com/charmbracelet/bubbletea/README.md
+++ b/vendor/github.com/charmbracelet/bubbletea/README.md
@@ -1,11 +1,15 @@
# Bubble Tea
-
+
+
+
+
+
+
-
The fun, functional and stateful way to build terminal apps. A Go framework
diff --git a/vendor/github.com/charmbracelet/bubbletea/Taskfile.yaml b/vendor/github.com/charmbracelet/bubbletea/Taskfile.yaml
new file mode 100644
index 00000000..35072035
--- /dev/null
+++ b/vendor/github.com/charmbracelet/bubbletea/Taskfile.yaml
@@ -0,0 +1,14 @@
+# https://taskfile.dev
+
+version: '3'
+
+tasks:
+ lint:
+ desc: Run lint
+ cmds:
+ - golangci-lint run
+
+ test:
+ desc: Run tests
+ cmds:
+ - go test ./... {{.CLI_ARGS}}
diff --git a/vendor/github.com/charmbracelet/bubbletea/exec.go b/vendor/github.com/charmbracelet/bubbletea/exec.go
index 7a14d2a7..80484145 100644
--- a/vendor/github.com/charmbracelet/bubbletea/exec.go
+++ b/vendor/github.com/charmbracelet/bubbletea/exec.go
@@ -114,6 +114,7 @@ func (p *Program) exec(c ExecCommand, fn ExecCallback) {
// Execute system command.
if err := c.Run(); err != nil {
+ p.renderer.resetLinesRendered()
_ = p.RestoreTerminal() // also try to restore the terminal.
if fn != nil {
go p.Send(fn(err))
@@ -121,6 +122,9 @@ func (p *Program) exec(c ExecCommand, fn ExecCallback) {
return
}
+ // Maintain the existing output from the command
+ p.renderer.resetLinesRendered()
+
// Have the program re-capture input.
err := p.RestoreTerminal()
if fn != nil {
diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go
index 3426a177..1d1b1761 100644
--- a/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go
+++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go
@@ -4,11 +4,16 @@
package tea
import (
+ "fmt"
"io"
"github.com/muesli/cancelreader"
)
func newInputReader(r io.Reader, _ bool) (cancelreader.CancelReader, error) {
- return cancelreader.NewReader(r)
+ cr, err := cancelreader.NewReader(r)
+ if err != nil {
+ return nil, fmt.Errorf("bubbletea: error creating cancel reader: %w", err)
+ }
+ return cr, nil
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go
index 9af89428..d76617d3 100644
--- a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go
+++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go
@@ -67,6 +67,8 @@ func newInputReader(r io.Reader, enableMouse bool) (cancelreader.CancelReader, e
func (r *conInputReader) Cancel() bool {
r.setCanceled()
+ // Warning: These cancel methods do not reliably work on console input
+ // and should not be counted on.
return windows.CancelIoEx(r.conin, nil) == nil || windows.CancelIo(r.conin) == nil
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/key.go b/vendor/github.com/charmbracelet/bubbletea/key.go
index ab4792ac..12a161a7 100644
--- a/vendor/github.com/charmbracelet/bubbletea/key.go
+++ b/vendor/github.com/charmbracelet/bubbletea/key.go
@@ -622,7 +622,7 @@ func detectOneMsg(b []byte, canHaveMoreData bool) (w int, msg Msg) {
case '<':
if matchIndices := mouseSGRRegex.FindSubmatchIndex(b[3:]); matchIndices != nil {
// SGR mouse events length is the length of the match plus the length of the escape sequence
- mouseEventSGRLen := matchIndices[1] + 3 //nolint:gomnd
+ mouseEventSGRLen := matchIndices[1] + 3 //nolint:mnd
return mouseEventSGRLen, MouseMsg(parseSGRMouseEvent(b))
}
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/key_sequences.go b/vendor/github.com/charmbracelet/bubbletea/key_sequences.go
index 15483ef5..dce9bf48 100644
--- a/vendor/github.com/charmbracelet/bubbletea/key_sequences.go
+++ b/vendor/github.com/charmbracelet/bubbletea/key_sequences.go
@@ -119,13 +119,12 @@ func detectBracketedPaste(input []byte) (hasBp bool, width int, msg Msg) {
}
// detectReportFocus detects a focus report sequence.
-// nolint: gomnd
func detectReportFocus(input []byte) (hasRF bool, width int, msg Msg) {
switch {
case bytes.Equal(input, []byte("\x1b[I")):
- return true, 3, FocusMsg{}
+ return true, 3, FocusMsg{} //nolint:mnd
case bytes.Equal(input, []byte("\x1b[O")):
- return true, 3, BlurMsg{}
+ return true, 3, BlurMsg{} //nolint:mnd
}
return false, 0, nil
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/key_windows.go b/vendor/github.com/charmbracelet/bubbletea/key_windows.go
index d59ff1c4..ac983739 100644
--- a/vendor/github.com/charmbracelet/bubbletea/key_windows.go
+++ b/vendor/github.com/charmbracelet/bubbletea/key_windows.go
@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"io"
+ "time"
"github.com/erikgeiser/coninput"
localereader "github.com/mattn/go-localereader"
@@ -25,14 +26,10 @@ func readConInputs(ctx context.Context, msgsch chan<- Msg, con *conInputReader)
var ps coninput.ButtonState // keep track of previous mouse state
var ws coninput.WindowBufferSizeEventRecord // keep track of the last window size event
for {
- events, err := coninput.ReadNConsoleInputs(con.conin, 16)
+ events, err := peekAndReadConsInput(con)
if err != nil {
- if con.isCanceled() {
- return cancelreader.ErrCanceled
- }
- return fmt.Errorf("read coninput events: %w", err)
+ return err
}
-
for _, event := range events {
var msgs []Msg
switch e := event.Unwrap().(type) {
@@ -87,13 +84,57 @@ func readConInputs(ctx context.Context, msgsch chan<- Msg, con *conInputReader)
if err != nil {
return fmt.Errorf("coninput context error: %w", err)
}
- return err
+ return nil
}
}
}
}
}
+// Peek for new input in a tight loop and then read the input.
+// windows.CancelIo* does not work reliably so peek first and only use the data if
+// the console input is not cancelled.
+func peekAndReadConsInput(con *conInputReader) ([]coninput.InputRecord, error) {
+ events, err := peekConsInput(con)
+ if err != nil {
+ return events, err
+ }
+ events, err = coninput.ReadNConsoleInputs(con.conin, intToUint32OrDie(len(events)))
+ if con.isCanceled() {
+ return events, cancelreader.ErrCanceled
+ }
+ if err != nil {
+ return events, fmt.Errorf("read coninput events: %w", err)
+ }
+ return events, nil
+}
+
+// Convert i to unit32 or panic if it cannot be converted. Check satisifes lint G115.
+func intToUint32OrDie(i int) uint32 {
+ if i < 0 {
+ panic("cannot convert numEvents " + fmt.Sprint(i) + " to uint32")
+ }
+ return uint32(i)
+}
+
+// Keeps peeking until there is data or the input is cancelled.
+func peekConsInput(con *conInputReader) ([]coninput.InputRecord, error) {
+ for {
+ events, err := coninput.PeekNConsoleInputs(con.conin, 16)
+ if con.isCanceled() {
+ return events, cancelreader.ErrCanceled
+ }
+ if err != nil {
+ return events, fmt.Errorf("peek coninput events: %w", err)
+ }
+ if len(events) > 0 {
+ return events, nil
+ }
+ // Sleep for a bit to avoid busy waiting.
+ time.Sleep(16 * time.Millisecond)
+ }
+}
+
func mouseEventButton(p, s coninput.ButtonState) (button MouseButton, action MouseAction) {
btn := p ^ s
action = MouseActionPress
@@ -114,7 +155,7 @@ func mouseEventButton(p, s coninput.ButtonState) (button MouseButton, action Mou
case s&coninput.FROM_LEFT_4TH_BUTTON_PRESSED > 0:
button = MouseButtonForward
}
- return
+ return button, action
}
switch {
@@ -147,7 +188,7 @@ func mouseEvent(p coninput.ButtonState, e coninput.MouseEventRecord) MouseMsg {
if ev.Action == MouseActionRelease {
ev.Type = MouseRelease
}
- switch ev.Button {
+ switch ev.Button { //nolint:exhaustive
case MouseButtonLeft:
ev.Type = MouseLeft
case MouseButtonMiddle:
@@ -190,7 +231,7 @@ func keyType(e coninput.KeyEventRecord) KeyType {
shiftPressed := e.ControlKeyState.Contains(coninput.SHIFT_PRESSED)
ctrlPressed := e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED | coninput.RIGHT_CTRL_PRESSED)
- switch code {
+ switch code { //nolint:exhaustive
case coninput.VK_RETURN:
return KeyEnter
case coninput.VK_BACK:
@@ -276,6 +317,46 @@ func keyType(e coninput.KeyEventRecord) KeyType {
return KeyPgDown
case coninput.VK_DELETE:
return KeyDelete
+ case coninput.VK_F1:
+ return KeyF1
+ case coninput.VK_F2:
+ return KeyF2
+ case coninput.VK_F3:
+ return KeyF3
+ case coninput.VK_F4:
+ return KeyF4
+ case coninput.VK_F5:
+ return KeyF5
+ case coninput.VK_F6:
+ return KeyF6
+ case coninput.VK_F7:
+ return KeyF7
+ case coninput.VK_F8:
+ return KeyF8
+ case coninput.VK_F9:
+ return KeyF9
+ case coninput.VK_F10:
+ return KeyF10
+ case coninput.VK_F11:
+ return KeyF11
+ case coninput.VK_F12:
+ return KeyF12
+ case coninput.VK_F13:
+ return KeyF13
+ case coninput.VK_F14:
+ return KeyF14
+ case coninput.VK_F15:
+ return KeyF15
+ case coninput.VK_F16:
+ return KeyF16
+ case coninput.VK_F17:
+ return KeyF17
+ case coninput.VK_F18:
+ return KeyF18
+ case coninput.VK_F19:
+ return KeyF19
+ case coninput.VK_F20:
+ return KeyF20
default:
switch {
case e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED) && e.ControlKeyState.Contains(coninput.RIGHT_ALT_PRESSED):
@@ -348,7 +429,7 @@ func keyType(e coninput.KeyEventRecord) KeyType {
return KeyCtrlUnderscore
}
- switch code {
+ switch code { //nolint:exhaustive
case coninput.VK_OEM_4:
return KeyCtrlOpenBracket
case coninput.VK_OEM_6:
diff --git a/vendor/github.com/charmbracelet/bubbletea/logging.go b/vendor/github.com/charmbracelet/bubbletea/logging.go
index a5311819..349758cb 100644
--- a/vendor/github.com/charmbracelet/bubbletea/logging.go
+++ b/vendor/github.com/charmbracelet/bubbletea/logging.go
@@ -33,7 +33,7 @@ type LogOptionsSetter interface {
// LogToFileWith does allows to call LogToFile with a custom LogOptionsSetter.
func LogToFileWith(path string, prefix string, log LogOptionsSetter) (*os.File, error) {
- f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) //nolint:gomnd
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) //nolint:mnd
if err != nil {
return nil, fmt.Errorf("error opening file for logging: %w", err)
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/mouse.go b/vendor/github.com/charmbracelet/bubbletea/mouse.go
index 6ec51cc0..490f49ac 100644
--- a/vendor/github.com/charmbracelet/bubbletea/mouse.go
+++ b/vendor/github.com/charmbracelet/bubbletea/mouse.go
@@ -172,7 +172,7 @@ const (
func parseSGRMouseEvent(buf []byte) MouseEvent {
str := string(buf[3:])
matches := mouseSGRRegex.FindStringSubmatch(str)
- if len(matches) != 5 { //nolint:gomnd
+ if len(matches) != 5 { //nolint:mnd
// Unreachable, we already checked the regex in `detectOneMsg`.
panic("invalid mouse event")
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/nil_renderer.go b/vendor/github.com/charmbracelet/bubbletea/nil_renderer.go
index 0bc4a172..1bc909b6 100644
--- a/vendor/github.com/charmbracelet/bubbletea/nil_renderer.go
+++ b/vendor/github.com/charmbracelet/bubbletea/nil_renderer.go
@@ -26,3 +26,4 @@ func (n nilRenderer) setWindowTitle(_ string) {}
func (n nilRenderer) reportFocus() bool { return false }
func (n nilRenderer) enableReportFocus() {}
func (n nilRenderer) disableReportFocus() {}
+func (n nilRenderer) resetLinesRendered() {}
diff --git a/vendor/github.com/charmbracelet/bubbletea/options.go b/vendor/github.com/charmbracelet/bubbletea/options.go
index c509353b..49cf378b 100644
--- a/vendor/github.com/charmbracelet/bubbletea/options.go
+++ b/vendor/github.com/charmbracelet/bubbletea/options.go
@@ -19,7 +19,7 @@ type ProgramOption func(*Program)
// cancelled it will exit with an error ErrProgramKilled.
func WithContext(ctx context.Context) ProgramOption {
return func(p *Program) {
- p.ctx = ctx
+ p.externalCtx = ctx
}
}
diff --git a/vendor/github.com/charmbracelet/bubbletea/renderer.go b/vendor/github.com/charmbracelet/bubbletea/renderer.go
index 9eb7943b..5b6df66a 100644
--- a/vendor/github.com/charmbracelet/bubbletea/renderer.go
+++ b/vendor/github.com/charmbracelet/bubbletea/renderer.go
@@ -79,6 +79,9 @@ type renderer interface {
// disableReportFocus stops reporting focus events to the program.
disableReportFocus()
+
+ // resetLinesRendered ensures exec output remains on screen on exit
+ resetLinesRendered()
}
// repaintMsg forces a full repaint.
diff --git a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go
index 45e8c82d..d6dce6db 100644
--- a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go
+++ b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go
@@ -545,6 +545,10 @@ func (r *standardRenderer) clearIgnoredLines() {
r.ignoreLines = nil
}
+func (r *standardRenderer) resetLinesRendered() {
+ r.linesRendered = 0
+}
+
// insertTop effectively scrolls up. It inserts lines at the top of a given
// area designated to be a scrollable region, pushing everything else down.
// This is roughly how ncurses does it.
diff --git a/vendor/github.com/charmbracelet/bubbletea/tea.go b/vendor/github.com/charmbracelet/bubbletea/tea.go
index 0bc915e5..c4866f22 100644
--- a/vendor/github.com/charmbracelet/bubbletea/tea.go
+++ b/vendor/github.com/charmbracelet/bubbletea/tea.go
@@ -27,6 +27,9 @@ import (
"golang.org/x/sync/errgroup"
)
+// ErrProgramPanic is returned by [Program.Run] when the program recovers from a panic.
+var ErrProgramPanic = errors.New("program experienced a panic")
+
// ErrProgramKilled is returned by [Program.Run] when the program gets killed.
var ErrProgramKilled = errors.New("program was killed")
@@ -147,6 +150,12 @@ type Program struct {
inputType inputType
+ // externalCtx is a context that was passed in via WithContext, otherwise defaulting
+ // to ctx.Background() (in case it was not), the internal context is derived from it.
+ externalCtx context.Context
+
+ // ctx is the programs's internal context for signalling internal teardown.
+ // It is built and derived from the externalCtx in NewProgram().
ctx context.Context
cancel context.CancelFunc
@@ -243,11 +252,11 @@ func NewProgram(model Model, opts ...ProgramOption) *Program {
// A context can be provided with a ProgramOption, but if none was provided
// we'll use the default background context.
- if p.ctx == nil {
- p.ctx = context.Background()
+ if p.externalCtx == nil {
+ p.externalCtx = context.Background()
}
// Initialize context and teardown channel.
- p.ctx, p.cancel = context.WithCancel(p.ctx)
+ p.ctx, p.cancel = context.WithCancel(p.externalCtx)
// if no output was set, set it to stdout
if p.output == nil {
@@ -346,7 +355,11 @@ func (p *Program) handleCommands(cmds chan Cmd) chan struct{} {
go func() {
// Recover from panics.
if !p.startupOptions.has(withoutCatchPanics) {
- defer p.recoverFromPanic()
+ defer func() {
+ if r := recover(); r != nil {
+ p.recoverFromGoPanic(r)
+ }
+ }()
}
msg := cmd() // this can be long.
@@ -422,7 +435,7 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
// work.
if runtime.GOOS == "windows" && !p.mouseMode {
p.mouseMode = true
- p.initCancelReader(true) //nolint:errcheck
+ p.initCancelReader(true) //nolint:errcheck,gosec
}
case disableMouseMsg:
@@ -433,7 +446,7 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
// mouse events.
if runtime.GOOS == "windows" && p.mouseMode {
p.mouseMode = false
- p.initCancelReader(true) //nolint:errcheck
+ p.initCancelReader(true) //nolint:errcheck,gosec
}
case showCursorMsg:
@@ -460,7 +473,11 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
case BatchMsg:
for _, cmd := range msg {
- cmds <- cmd
+ select {
+ case <-p.ctx.Done():
+ return model, nil
+ case cmds <- cmd:
+ }
}
continue
@@ -483,7 +500,7 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
})
}
- //nolint:errcheck
+ //nolint:errcheck,gosec
g.Wait() // wait for all commands from batch msg to finish
continue
}
@@ -506,7 +523,13 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
var cmd Cmd
model, cmd = model.Update(msg) // run update
- cmds <- cmd // process command (if any)
+
+ select {
+ case <-p.ctx.Done():
+ return model, nil
+ case cmds <- cmd: // process command (if any)
+ }
+
p.renderer.write(model.View()) // send view to renderer
}
}
@@ -515,11 +538,15 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) {
// Run initializes the program and runs its event loops, blocking until it gets
// terminated by either [Program.Quit], [Program.Kill], or its signal handler.
// Returns the final model.
-func (p *Program) Run() (Model, error) {
+func (p *Program) Run() (returnModel Model, returnErr error) {
p.handlers = channelHandlers{}
cmds := make(chan Cmd)
- p.errs = make(chan error)
- p.finished = make(chan struct{}, 1)
+ p.errs = make(chan error, 1)
+
+ p.finished = make(chan struct{})
+ defer func() {
+ close(p.finished)
+ }()
defer p.cancel()
@@ -568,7 +595,12 @@ func (p *Program) Run() (Model, error) {
// Recover from panics.
if !p.startupOptions.has(withoutCatchPanics) {
- defer p.recoverFromPanic()
+ defer func() {
+ if r := recover(); r != nil {
+ returnErr = fmt.Errorf("%w: %w", ErrProgramKilled, ErrProgramPanic)
+ p.recoverFromPanic(r)
+ }
+ }()
}
// If no renderer is set use the standard one.
@@ -645,11 +677,27 @@ func (p *Program) Run() (Model, error) {
// Run event loop, handle updates and draw.
model, err := p.eventLoop(model, cmds)
- killed := p.ctx.Err() != nil || err != nil
- if killed && err == nil {
- err = fmt.Errorf("%w: %s", ErrProgramKilled, p.ctx.Err())
+
+ if err == nil && len(p.errs) > 0 {
+ err = <-p.errs // Drain a leftover error in case eventLoop crashed
}
- if err == nil {
+
+ killed := p.externalCtx.Err() != nil || p.ctx.Err() != nil || err != nil
+ if killed {
+ if err == nil && p.externalCtx.Err() != nil {
+ // Return also as context error the cancellation of an external context.
+ // This is the context the user knows about and should be able to act on.
+ err = fmt.Errorf("%w: %w", ErrProgramKilled, p.externalCtx.Err())
+ } else if err == nil && p.ctx.Err() != nil {
+ // Return only that the program was killed (not the internal mechanism).
+ // The user does not know or need to care about the internal program context.
+ err = ErrProgramKilled
+ } else {
+ // Return that the program was killed and also the error that caused it.
+ err = fmt.Errorf("%w: %w", ErrProgramKilled, err)
+ }
+ } else {
+ // Graceful shutdown of the program (not killed):
// Ensure we rendered the final state of the model.
p.renderer.write(model.View())
}
@@ -704,11 +752,11 @@ func (p *Program) Quit() {
p.Send(Quit())
}
-// Kill stops the program immediately and restores the former terminal state.
+// Kill signals the program to stop immediately and restore the former terminal state.
// The final render that you would normally see when quitting will be skipped.
// [program.Run] returns a [ErrProgramKilled] error.
func (p *Program) Kill() {
- p.shutdown(true)
+ p.cancel()
}
// Wait waits/blocks until the underlying Program finished shutting down.
@@ -717,7 +765,11 @@ func (p *Program) Wait() {
}
// shutdown performs operations to free up resources and restore the terminal
-// to its original state.
+// to its original state. It is called once at the end of the program's lifetime.
+//
+// This method should not be called to signal the program to be killed/shutdown.
+// Doing so can lead to race conditions with the eventual call at the program's end.
+// As alternatives, the [Quit] or [Kill] convenience methods should be used instead.
func (p *Program) shutdown(kill bool) {
p.cancel()
@@ -744,19 +796,30 @@ func (p *Program) shutdown(kill bool) {
}
_ = p.restoreTerminalState()
- if !kill {
- p.finished <- struct{}{}
- }
}
// recoverFromPanic recovers from a panic, prints the stack trace, and restores
// the terminal to a usable state.
-func (p *Program) recoverFromPanic() {
- if r := recover(); r != nil {
- p.shutdown(true)
- fmt.Printf("Caught panic:\n\n%s\n\nRestoring terminal...\n\n", r)
- debug.PrintStack()
+func (p *Program) recoverFromPanic(r interface{}) {
+ select {
+ case p.errs <- ErrProgramPanic:
+ default:
}
+ p.shutdown(true) // Ok to call here, p.Run() cannot do it anymore.
+ fmt.Printf("Caught panic:\n\n%s\n\nRestoring terminal...\n\n", r)
+ debug.PrintStack()
+}
+
+// recoverFromGoPanic recovers from a goroutine panic, prints a stack trace and
+// signals for the program to be killed and terminal restored to a usable state.
+func (p *Program) recoverFromGoPanic(r interface{}) {
+ select {
+ case p.errs <- ErrProgramPanic:
+ default:
+ }
+ p.cancel()
+ fmt.Printf("Caught goroutine panic:\n\n%s\n\nRestoring terminal...\n\n", r)
+ debug.PrintStack()
}
// ReleaseTerminal restores the original terminal state and cancels the input
diff --git a/vendor/github.com/charmbracelet/bubbletea/tty.go b/vendor/github.com/charmbracelet/bubbletea/tty.go
index 9490faca..6812bfc5 100644
--- a/vendor/github.com/charmbracelet/bubbletea/tty.go
+++ b/vendor/github.com/charmbracelet/bubbletea/tty.go
@@ -52,7 +52,7 @@ func (p *Program) restoreTerminalState() error {
p.renderer.exitAltScreen()
// give the terminal a moment to catch up
- time.Sleep(time.Millisecond * 10) //nolint:gomnd
+ time.Sleep(time.Millisecond * 10) //nolint:mnd
}
}
@@ -109,7 +109,7 @@ func (p *Program) readLoop() {
func (p *Program) waitForReadLoop() {
select {
case <-p.readLoopDone:
- case <-time.After(500 * time.Millisecond): //nolint:gomnd
+ case <-time.After(500 * time.Millisecond): //nolint:mnd
// The read loop hangs, which means the input
// cancelReader's cancel function has returned true even
// though it was not able to cancel the read.
diff --git a/vendor/github.com/charmbracelet/bubbletea/tty_windows.go b/vendor/github.com/charmbracelet/bubbletea/tty_windows.go
index a3a2525b..154491a6 100644
--- a/vendor/github.com/charmbracelet/bubbletea/tty_windows.go
+++ b/vendor/github.com/charmbracelet/bubbletea/tty_windows.go
@@ -19,7 +19,7 @@ func (p *Program) initInput() (err error) {
p.ttyInput = f
p.previousTtyInputState, err = term.MakeRaw(p.ttyInput.Fd())
if err != nil {
- return err
+ return fmt.Errorf("error making raw: %w", err)
}
// Enable VT input
@@ -38,7 +38,7 @@ func (p *Program) initInput() (err error) {
p.ttyOutput = f
p.previousOutputState, err = term.GetState(f.Fd())
if err != nil {
- return err
+ return fmt.Errorf("error getting state: %w", err)
}
var mode uint32
@@ -51,14 +51,14 @@ func (p *Program) initInput() (err error) {
}
}
- return
+ return nil
}
// Open the Windows equivalent of a TTY.
func openInputTTY() (*os.File, error) {
f, err := os.OpenFile("CONIN$", os.O_RDWR, 0o644)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("error opening file: %w", err)
}
return f, nil
}
diff --git a/vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml b/vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml
deleted file mode 100644
index d325d4fc..00000000
--- a/vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-run:
- tests: false
- issues-exit-code: 0
-
-issues:
- include:
- - EXC0001
- - EXC0005
- - EXC0011
- - EXC0012
- - EXC0013
-
- max-issues-per-linter: 0
- max-same-issues: 0
-
-linters:
- enable:
- - exhaustive
- - goconst
- - godot
- - godox
- - mnd
- - gomoddirectives
- - goprintffuncname
- - misspell
- - nakedret
- - nestif
- - noctx
- - nolintlint
- - prealloc
- - wrapcheck
-
- # disable default linters, they are already enabled in .golangci.yml
- disable:
- - errcheck
- - gosimple
- - govet
- - ineffassign
- - staticcheck
- - unused
diff --git a/vendor/github.com/charmbracelet/colorprofile/.golangci.yml b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml
index d6789e01..be61d89b 100644
--- a/vendor/github.com/charmbracelet/colorprofile/.golangci.yml
+++ b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml
@@ -1,24 +1,23 @@
+version: "2"
run:
tests: false
-
-issues:
- include:
- - EXC0001
- - EXC0005
- - EXC0011
- - EXC0012
- - EXC0013
-
- max-issues-per-linter: 0
- max-same-issues: 0
-
linters:
enable:
- bodyclose
- - gofumpt
- - goimports
+ - exhaustive
+ - goconst
+ - godot
+ - godox
+ - gomoddirectives
+ - goprintffuncname
- gosec
+ - misspell
+ - nakedret
+ - nestif
- nilerr
+ - noctx
+ - nolintlint
+ - prealloc
- revive
- rowserrcheck
- sqlclosecheck
@@ -26,3 +25,17 @@ linters:
- unconvert
- unparam
- whitespace
+ - wrapcheck
+ exclusions:
+ generated: lax
+ presets:
+ - common-false-positives
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ exclusions:
+ generated: lax
diff --git a/vendor/github.com/charmbracelet/colorprofile/doc.go b/vendor/github.com/charmbracelet/colorprofile/doc.go
new file mode 100644
index 00000000..bd522e90
--- /dev/null
+++ b/vendor/github.com/charmbracelet/colorprofile/doc.go
@@ -0,0 +1,4 @@
+// Package colorprofile provides a way to downsample ANSI escape sequence
+// colors and styles automatically based on output, environment variables, and
+// Terminfo databases.
+package colorprofile
diff --git a/vendor/github.com/charmbracelet/colorprofile/env.go b/vendor/github.com/charmbracelet/colorprofile/env.go
index 8df3d8f7..749f989e 100644
--- a/vendor/github.com/charmbracelet/colorprofile/env.go
+++ b/vendor/github.com/charmbracelet/colorprofile/env.go
@@ -12,6 +12,8 @@ import (
"github.com/xo/terminfo"
)
+const dumbTerm = "dumb"
+
// Detect returns the color profile based on the terminal output, and
// environment variables. This respects NO_COLOR, CLICOLOR, and CLICOLOR_FORCE
// environment variables.
@@ -29,10 +31,10 @@ import (
// See https://no-color.org/ and https://bixense.com/clicolors/ for more information.
func Detect(output io.Writer, env []string) Profile {
out, ok := output.(term.File)
- isatty := ok && term.IsTerminal(out.Fd())
environ := newEnviron(env)
- term := environ.get("TERM")
- isDumb := term == "dumb"
+ isatty := isTTYForced(environ) || (ok && term.IsTerminal(out.Fd()))
+ term, ok := environ.lookup("TERM")
+ isDumb := !ok || term == dumbTerm
envp := colorProfile(isatty, environ)
if envp == TrueColor || envNoColor(environ) {
// We already know we have TrueColor, or NO_COLOR is set.
@@ -69,7 +71,8 @@ func Env(env []string) (p Profile) {
}
func colorProfile(isatty bool, env environ) (p Profile) {
- isDumb := env.get("TERM") == "dumb"
+ term, ok := env.lookup("TERM")
+ isDumb := (!ok && runtime.GOOS != "windows") || term == dumbTerm
envp := envColorProfile(env)
if !isatty || isDumb {
// Check if the output is a terminal.
@@ -83,7 +86,7 @@ func colorProfile(isatty bool, env environ) (p Profile) {
if p > Ascii {
p = Ascii
}
- return
+ return //nolint:nakedret
}
if cliColorForced(env) {
@@ -94,7 +97,7 @@ func colorProfile(isatty bool, env environ) (p Profile) {
p = envp
}
- return
+ return //nolint:nakedret
}
if cliColor(env) {
@@ -123,6 +126,11 @@ func cliColorForced(env environ) bool {
return cliColorForce
}
+func isTTYForced(env environ) bool {
+ skip, _ := strconv.ParseBool(env.get("TTY_FORCE"))
+ return skip
+}
+
func colorTerm(env environ) bool {
colorTerm := strings.ToLower(env.get("COLORTERM"))
return colorTerm == "truecolor" || colorTerm == "24bit" ||
@@ -132,7 +140,7 @@ func colorTerm(env environ) bool {
// envColorProfile returns infers the color profile from the environment.
func envColorProfile(env environ) (p Profile) {
term, ok := env.lookup("TERM")
- if !ok || len(term) == 0 || term == "dumb" {
+ if !ok || len(term) == 0 || term == dumbTerm {
p = NoTTY
if runtime.GOOS == "windows" {
// Use Windows API to detect color profile. Windows Terminal and
@@ -184,7 +192,12 @@ func envColorProfile(env environ) (p Profile) {
p = ANSI256
}
- return
+ // Direct color terminals support true colors.
+ if strings.HasSuffix(term, "direct") {
+ return TrueColor
+ }
+
+ return //nolint:nakedret
}
// Terminfo returns the color profile based on the terminal's terminfo
@@ -278,10 +291,3 @@ func (e environ) get(key string) string {
v, _ := e.lookup(key)
return v
}
-
-func max[T ~byte | ~int](a, b T) T {
- if a > b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/charmbracelet/colorprofile/profile.go b/vendor/github.com/charmbracelet/colorprofile/profile.go
index 97e37ac3..2bcb37c6 100644
--- a/vendor/github.com/charmbracelet/colorprofile/profile.go
+++ b/vendor/github.com/charmbracelet/colorprofile/profile.go
@@ -12,15 +12,15 @@ import (
type Profile byte
const (
- // NoTTY, not a terminal profile.
+ // NoTTY is a profile with no terminal support.
NoTTY Profile = iota
- // Ascii, uncolored profile.
+ // Ascii is a profile with no color support.
Ascii //nolint:revive
- // ANSI, 4-bit color profile.
+ // ANSI is a profile with 16 colors (4-bit).
ANSI
- // ANSI256, 8-bit color profile.
+ // ANSI256 is a profile with 256 colors (8-bit).
ANSI256
- // TrueColor, 24-bit color profile.
+ // TrueColor is a profile with 16 million colors (24-bit).
TrueColor
)
diff --git a/vendor/github.com/charmbracelet/colorprofile/writer.go b/vendor/github.com/charmbracelet/colorprofile/writer.go
index d04b3b99..47f0c6eb 100644
--- a/vendor/github.com/charmbracelet/colorprofile/writer.go
+++ b/vendor/github.com/charmbracelet/colorprofile/writer.go
@@ -2,6 +2,7 @@ package colorprofile
import (
"bytes"
+ "fmt"
"image/color"
"io"
"strconv"
@@ -37,11 +38,13 @@ type Writer struct {
func (w *Writer) Write(p []byte) (int, error) {
switch w.Profile {
case TrueColor:
- return w.Forward.Write(p)
+ return w.Forward.Write(p) //nolint:wrapcheck
case NoTTY:
- return io.WriteString(w.Forward, ansi.Strip(string(p)))
- default:
+ return io.WriteString(w.Forward, ansi.Strip(string(p))) //nolint:wrapcheck
+ case Ascii, ANSI, ANSI256:
return w.downsample(p)
+ default:
+ return 0, fmt.Errorf("invalid profile: %v", w.Profile)
}
}
@@ -63,7 +66,7 @@ func (w *Writer) downsample(p []byte) (int, error) {
default:
// If we're not a style SGR sequence, just write the bytes.
if n, err := buf.Write(seq); err != nil {
- return n, err
+ return n, err //nolint:wrapcheck
}
}
@@ -71,7 +74,7 @@ func (w *Writer) downsample(p []byte) (int, error) {
state = newState
}
- return w.Forward.Write(buf.Bytes())
+ return w.Forward.Write(buf.Bytes()) //nolint:wrapcheck
}
// WriteString writes the given text to the underlying writer.
diff --git a/vendor/github.com/charmbracelet/log/.golangci.yml b/vendor/github.com/charmbracelet/log/.golangci.yml
index 90c5c08b..be61d89b 100644
--- a/vendor/github.com/charmbracelet/log/.golangci.yml
+++ b/vendor/github.com/charmbracelet/log/.golangci.yml
@@ -1,17 +1,6 @@
+version: "2"
run:
tests: false
-
-issues:
- include:
- - EXC0001
- - EXC0005
- - EXC0011
- - EXC0012
- - EXC0013
-
- max-issues-per-linter: 0
- max-same-issues: 0
-
linters:
enable:
- bodyclose
@@ -19,8 +8,6 @@ linters:
- goconst
- godot
- godox
- - gofumpt
- - goimports
- gomoddirectives
- goprintffuncname
- gosec
@@ -39,3 +26,16 @@ linters:
- unparam
- whitespace
- wrapcheck
+ exclusions:
+ generated: lax
+ presets:
+ - common-false-positives
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ exclusions:
+ generated: lax
diff --git a/vendor/github.com/charmbracelet/log/json.go b/vendor/github.com/charmbracelet/log/json.go
index 19d4ddd1..f84083d4 100644
--- a/vendor/github.com/charmbracelet/log/json.go
+++ b/vendor/github.com/charmbracelet/log/json.go
@@ -88,7 +88,13 @@ func (l *Logger) writeSlogValue(jw *jsonWriter, v slogValue) {
}
jw.end()
default:
- jw.objectValue(v.Any())
+ a := v.Any()
+ _, jm := a.(json.Marshaler)
+ if err, ok := a.(error); ok && !jm {
+ jw.objectValue(err.Error())
+ } else {
+ jw.objectValue(a)
+ }
}
}
diff --git a/vendor/github.com/charmbracelet/log/logger_121.go b/vendor/github.com/charmbracelet/log/logger_121.go
index b5cf9564..478e1a0d 100644
--- a/vendor/github.com/charmbracelet/log/logger_121.go
+++ b/vendor/github.com/charmbracelet/log/logger_121.go
@@ -17,6 +17,8 @@ type (
slogLogValuer = slog.LogValuer
)
+var slogAnyValue = slog.AnyValue
+
const slogKindGroup = slog.KindGroup
// Enabled reports whether the logger is enabled for the given level.
diff --git a/vendor/github.com/charmbracelet/log/logger_no121.go b/vendor/github.com/charmbracelet/log/logger_no121.go
index ce8b8a2f..ea8bb108 100644
--- a/vendor/github.com/charmbracelet/log/logger_no121.go
+++ b/vendor/github.com/charmbracelet/log/logger_no121.go
@@ -18,6 +18,8 @@ type (
slogLogValuer = slog.LogValuer
)
+var slogAnyValue = slog.AnyValue
+
const slogKindGroup = slog.KindGroup
// Enabled reports whether the logger is enabled for the given level.
diff --git a/vendor/github.com/charmbracelet/x/ansi/ansi.go b/vendor/github.com/charmbracelet/x/ansi/ansi.go
index 48d873c3..d5a2f251 100644
--- a/vendor/github.com/charmbracelet/x/ansi/ansi.go
+++ b/vendor/github.com/charmbracelet/x/ansi/ansi.go
@@ -7,5 +7,5 @@ import "io"
//
// This is a syntactic sugar over [io.WriteString].
func Execute(w io.Writer, s string) (int, error) {
- return io.WriteString(w, s)
+ return io.WriteString(w, s) //nolint:wrapcheck
}
diff --git a/vendor/github.com/charmbracelet/x/ansi/background.go b/vendor/github.com/charmbracelet/x/ansi/background.go
index 2383cf09..46f82142 100644
--- a/vendor/github.com/charmbracelet/x/ansi/background.go
+++ b/vendor/github.com/charmbracelet/x/ansi/background.go
@@ -3,63 +3,93 @@ package ansi
import (
"fmt"
"image/color"
+
+ "github.com/lucasb-eyer/go-colorful"
)
-// Colorizer is a [color.Color] interface that can be formatted as a string.
-type Colorizer interface {
- color.Color
- fmt.Stringer
+// HexColor is a [color.Color] that can be formatted as a hex string.
+type HexColor string
+
+// RGBA returns the RGBA values of the color.
+func (h HexColor) RGBA() (r, g, b, a uint32) {
+ hex := h.color()
+ if hex == nil {
+ return 0, 0, 0, 0
+ }
+ return hex.RGBA()
}
-// HexColorizer is a [color.Color] that can be formatted as a hex string.
-type HexColorizer struct{ color.Color }
-
-var _ Colorizer = HexColorizer{}
+// Hex returns the hex representation of the color. If the color is invalid, it
+// returns an empty string.
+func (h HexColor) Hex() string {
+ hex := h.color()
+ if hex == nil {
+ return ""
+ }
+ return hex.Hex()
+}
// String returns the color as a hex string. If the color is nil, an empty
// string is returned.
-func (h HexColorizer) String() string {
- if h.Color == nil {
- return ""
- }
- r, g, b, _ := h.RGBA()
- // Get the lower 8 bits
- r &= 0xff
- g &= 0xff
- b &= 0xff
- return fmt.Sprintf("#%02x%02x%02x", uint8(r), uint8(g), uint8(b)) //nolint:gosec
+func (h HexColor) String() string {
+ return h.Hex()
}
-// XRGBColorizer is a [color.Color] that can be formatted as an XParseColor
+// color returns the underlying color of the HexColor.
+func (h HexColor) color() *colorful.Color {
+ hex, err := colorful.Hex(string(h))
+ if err != nil {
+ return nil
+ }
+ return &hex
+}
+
+// XRGBColor is a [color.Color] that can be formatted as an XParseColor
// rgb: string.
//
// See: https://linux.die.net/man/3/xparsecolor
-type XRGBColorizer struct{ color.Color }
+type XRGBColor struct {
+ color.Color
+}
-var _ Colorizer = XRGBColorizer{}
+// RGBA returns the RGBA values of the color.
+func (x XRGBColor) RGBA() (r, g, b, a uint32) {
+ if x.Color == nil {
+ return 0, 0, 0, 0
+ }
+ return x.Color.RGBA()
+}
// String returns the color as an XParseColor rgb: string. If the color is nil,
// an empty string is returned.
-func (x XRGBColorizer) String() string {
+func (x XRGBColor) String() string {
if x.Color == nil {
return ""
}
- r, g, b, _ := x.RGBA()
+ r, g, b, _ := x.Color.RGBA()
// Get the lower 8 bits
return fmt.Sprintf("rgb:%04x/%04x/%04x", r, g, b)
}
-// XRGBAColorizer is a [color.Color] that can be formatted as an XParseColor
+// XRGBAColor is a [color.Color] that can be formatted as an XParseColor
// rgba: string.
//
// See: https://linux.die.net/man/3/xparsecolor
-type XRGBAColorizer struct{ color.Color }
+type XRGBAColor struct {
+ color.Color
+}
-var _ Colorizer = XRGBAColorizer{}
+// RGBA returns the RGBA values of the color.
+func (x XRGBAColor) RGBA() (r, g, b, a uint32) {
+ if x.Color == nil {
+ return 0, 0, 0, 0
+ }
+ return x.Color.RGBA()
+}
// String returns the color as an XParseColor rgba: string. If the color is nil,
// an empty string is returned.
-func (x XRGBAColorizer) String() string {
+func (x XRGBAColor) String() string {
if x.Color == nil {
return ""
}
@@ -74,19 +104,12 @@ func (x XRGBAColorizer) String() string {
// OSC 10 ; color ST
// OSC 10 ; color BEL
//
-// Where color is the encoded color number.
+// Where color is the encoded color number. Most terminals support hex,
+// XParseColor rgb: and rgba: strings. You could use [HexColor], [XRGBColor],
+// or [XRGBAColor] to format the color.
//
// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands
-func SetForegroundColor(c color.Color) string {
- var s string
- switch c := c.(type) {
- case Colorizer:
- s = c.String()
- case fmt.Stringer:
- s = c.String()
- default:
- s = HexColorizer{c}.String()
- }
+func SetForegroundColor(s string) string {
return "\x1b]10;" + s + "\x07"
}
@@ -108,19 +131,12 @@ const ResetForegroundColor = "\x1b]110\x07"
// OSC 11 ; color ST
// OSC 11 ; color BEL
//
-// Where color is the encoded color number.
+// Where color is the encoded color number. Most terminals support hex,
+// XParseColor rgb: and rgba: strings. You could use [HexColor], [XRGBColor],
+// or [XRGBAColor] to format the color.
//
// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands
-func SetBackgroundColor(c color.Color) string {
- var s string
- switch c := c.(type) {
- case Colorizer:
- s = c.String()
- case fmt.Stringer:
- s = c.String()
- default:
- s = HexColorizer{c}.String()
- }
+func SetBackgroundColor(s string) string {
return "\x1b]11;" + s + "\x07"
}
@@ -141,19 +157,12 @@ const ResetBackgroundColor = "\x1b]111\x07"
// OSC 12 ; color ST
// OSC 12 ; color BEL
//
-// Where color is the encoded color number.
+// Where color is the encoded color number. Most terminals support hex,
+// XParseColor rgb: and rgba: strings. You could use [HexColor], [XRGBColor],
+// or [XRGBAColor] to format the color.
//
// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands
-func SetCursorColor(c color.Color) string {
- var s string
- switch c := c.(type) {
- case Colorizer:
- s = c.String()
- case fmt.Stringer:
- s = c.String()
- default:
- s = HexColorizer{c}.String()
- }
+func SetCursorColor(s string) string {
return "\x1b]12;" + s + "\x07"
}
diff --git a/vendor/github.com/charmbracelet/x/ansi/charset.go b/vendor/github.com/charmbracelet/x/ansi/charset.go
index 50fff51f..02edfe7a 100644
--- a/vendor/github.com/charmbracelet/x/ansi/charset.go
+++ b/vendor/github.com/charmbracelet/x/ansi/charset.go
@@ -39,17 +39,17 @@ func SCS(gset byte, charset byte) string {
return SelectCharacterSet(gset, charset)
}
-// Locking Shift 1 Right (LS1R) shifts G1 into GR character set.
+// LS1R (Locking Shift 1 Right) shifts G1 into GR character set.
const LS1R = "\x1b~"
-// Locking Shift 2 (LS2) shifts G2 into GL character set.
+// LS2 (Locking Shift 2) shifts G2 into GL character set.
const LS2 = "\x1bn"
-// Locking Shift 2 Right (LS2R) shifts G2 into GR character set.
+// LS2R (Locking Shift 2 Right) shifts G2 into GR character set.
const LS2R = "\x1b}"
-// Locking Shift 3 (LS3) shifts G3 into GL character set.
+// LS3 (Locking Shift 3) shifts G3 into GL character set.
const LS3 = "\x1bo"
-// Locking Shift 3 Right (LS3R) shifts G3 into GR character set.
+// LS3R (Locking Shift 3 Right) shifts G3 into GR character set.
const LS3R = "\x1b|"
diff --git a/vendor/github.com/charmbracelet/x/ansi/color.go b/vendor/github.com/charmbracelet/x/ansi/color.go
index 77f8a08d..09feb975 100644
--- a/vendor/github.com/charmbracelet/x/ansi/color.go
+++ b/vendor/github.com/charmbracelet/x/ansi/color.go
@@ -2,34 +2,9 @@ package ansi
import (
"image/color"
-)
-// Technically speaking, the 16 basic ANSI colors are arbitrary and can be
-// customized at the terminal level. Given that, we're returning what we feel
-// are good defaults.
-//
-// This could also be a slice, but we use a map to make the mappings very
-// explicit.
-//
-// See: https://www.ditig.com/publications/256-colors-cheat-sheet
-var lowANSI = map[uint32]uint32{
- 0: 0x000000, // black
- 1: 0x800000, // red
- 2: 0x008000, // green
- 3: 0x808000, // yellow
- 4: 0x000080, // blue
- 5: 0x800080, // magenta
- 6: 0x008080, // cyan
- 7: 0xc0c0c0, // white
- 8: 0x808080, // bright black
- 9: 0xff0000, // bright red
- 10: 0x00ff00, // bright green
- 11: 0xffff00, // bright yellow
- 12: 0x0000ff, // bright blue
- 13: 0xff00ff, // bright magenta
- 14: 0x00ffff, // bright cyan
- 15: 0xffffff, // bright white
-}
+ "github.com/lucasb-eyer/go-colorful"
+)
// Color is a color that can be used in a terminal. ANSI (including
// ANSI256) and 24-bit "true colors" fall under this category.
@@ -100,28 +75,33 @@ func (c BasicColor) RGBA() (uint32, uint32, uint32, uint32) {
return 0, 0, 0, 0xffff
}
- r, g, b := ansiToRGB(ansi)
- return toRGBA(r, g, b)
+ return ansiToRGB(byte(ansi)).RGBA()
}
-// ExtendedColor is an ANSI 256 (8-bit) color with a value from 0 to 255.
-type ExtendedColor uint8
+// IndexedColor is an ANSI 256 (8-bit) color with a value from 0 to 255.
+type IndexedColor uint8
-var _ Color = ExtendedColor(0)
+var _ Color = IndexedColor(0)
// RGBA returns the red, green, blue and alpha components of the color. It
// satisfies the color.Color interface.
-func (c ExtendedColor) RGBA() (uint32, uint32, uint32, uint32) {
- r, g, b := ansiToRGB(uint32(c))
- return toRGBA(r, g, b)
+func (c IndexedColor) RGBA() (uint32, uint32, uint32, uint32) {
+ return ansiToRGB(byte(c)).RGBA()
}
+// ExtendedColor is an ANSI 256 (8-bit) color with a value from 0 to 255.
+//
+// Deprecated: use [IndexedColor] instead.
+type ExtendedColor = IndexedColor
+
// TrueColor is a 24-bit color that can be used in the terminal.
// This can be used to represent RGB colors.
//
// For example, the color red can be represented as:
//
// TrueColor(0xff0000)
+//
+// Deprecated: use [RGBColor] instead.
type TrueColor uint32
var _ Color = TrueColor(0)
@@ -133,44 +113,25 @@ func (c TrueColor) RGBA() (uint32, uint32, uint32, uint32) {
return toRGBA(r, g, b)
}
+// RGBColor is a 24-bit color that can be used in the terminal.
+// This can be used to represent RGB colors.
+type RGBColor struct {
+ R uint8
+ G uint8
+ B uint8
+}
+
+// RGBA returns the red, green, blue and alpha components of the color. It
+// satisfies the color.Color interface.
+func (c RGBColor) RGBA() (uint32, uint32, uint32, uint32) {
+ return toRGBA(uint32(c.R), uint32(c.G), uint32(c.B))
+}
+
// ansiToRGB converts an ANSI color to a 24-bit RGB color.
//
// r, g, b := ansiToRGB(57)
-func ansiToRGB(ansi uint32) (uint32, uint32, uint32) {
- // For out-of-range values return black.
- if ansi > 255 {
- return 0, 0, 0
- }
-
- // Low ANSI.
- if ansi < 16 {
- h, ok := lowANSI[ansi]
- if !ok {
- return 0, 0, 0
- }
- r, g, b := hexToRGB(h)
- return r, g, b
- }
-
- // Grays.
- if ansi > 231 {
- s := (ansi-232)*10 + 8
- return s, s, s
- }
-
- // ANSI256.
- n := ansi - 16
- b := n % 6
- g := (n - b) / 6 % 6
- r := (n - b - g*6) / 36 % 6
- for _, v := range []*uint32{&r, &g, &b} {
- if *v > 0 {
- c := *v*40 + 55
- *v = c
- }
- }
-
- return r, g, b
+func ansiToRGB(ansi byte) color.Color {
+ return ansiHex[ansi]
}
// hexToRGB converts a number in hexadecimal format to red, green, and blue
@@ -194,3 +155,630 @@ func toRGBA(r, g, b uint32) (uint32, uint32, uint32, uint32) {
b |= b << 8
return r, g, b, 0xffff
}
+
+//nolint:unused
+func distSq(r1, g1, b1, r2, g2, b2 int) int {
+ return ((r1-r2)*(r1-r2) + (g1-g2)*(g1-g2) + (b1-b2)*(b1-b2))
+}
+
+func to6Cube[T int | float64](v T) int {
+ if v < 48 {
+ return 0
+ }
+ if v < 115 {
+ return 1
+ }
+ return int((v - 35) / 40)
+}
+
+// Convert256 converts a [color.Color], usually a 24-bit color, to xterm(1) 256
+// color palette.
+//
+// xterm provides a 6x6x6 color cube (16 - 231) and 24 greys (232 - 255). We
+// map our RGB color to the closest in the cube, also work out the closest
+// grey, and use the nearest of the two based on the lightness of the color.
+//
+// Note that the xterm has much lower resolution for darker colors (they are
+// not evenly spread out), so our 6 levels are not evenly spread: 0x0, 0x5f
+// (95), 0x87 (135), 0xaf (175), 0xd7 (215) and 0xff (255). Greys are more
+// evenly spread (8, 18, 28 ... 238).
+func Convert256(c color.Color) IndexedColor {
+ // If the color is already an IndexedColor, return it.
+ if i, ok := c.(IndexedColor); ok {
+ return i
+ }
+
+ // Note: this is mostly ported from tmux/colour.c.
+ col, ok := colorful.MakeColor(c)
+ if !ok {
+ return IndexedColor(0)
+ }
+
+ r := col.R * 255
+ g := col.G * 255
+ b := col.B * 255
+
+ q2c := [6]int{0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff}
+
+ // Map RGB to 6x6x6 cube.
+ qr := to6Cube(r)
+ cr := q2c[qr]
+ qg := to6Cube(g)
+ cg := q2c[qg]
+ qb := to6Cube(b)
+ cb := q2c[qb]
+
+ // If we have hit the color exactly, return early.
+ ci := (36 * qr) + (6 * qg) + qb
+ if cr == int(r) && cg == int(g) && cb == int(b) {
+ return IndexedColor(16 + ci) //nolint:gosec
+ }
+
+ // Work out the closest grey (average of RGB).
+ greyAvg := int(r+g+b) / 3
+ var greyIdx int
+ if greyAvg > 238 {
+ greyIdx = 23
+ } else {
+ greyIdx = (greyAvg - 3) / 10
+ }
+ grey := 8 + (10 * greyIdx)
+
+ // Return the one which is nearer to the original input rgb value
+ // XXX: This is where it differs from tmux's implementation, we prefer the
+ // closer color to the original in terms of light distances rather than the
+ // cube distance.
+ c2 := colorful.Color{R: float64(cr) / 255.0, G: float64(cg) / 255.0, B: float64(cb) / 255.0}
+ g2 := colorful.Color{R: float64(grey) / 255.0, G: float64(grey) / 255.0, B: float64(grey) / 255.0}
+ colorDist := col.DistanceHSLuv(c2)
+ grayDist := col.DistanceHSLuv(g2)
+
+ if colorDist <= grayDist {
+ return IndexedColor(16 + ci) //nolint:gosec
+ }
+ return IndexedColor(232 + greyIdx) //nolint:gosec
+
+ // // Is grey or 6x6x6 color closest?
+ // d := distSq(cr, cg, cb, int(r), int(g), int(b))
+ // if distSq(grey, grey, grey, int(r), int(g), int(b)) < d {
+ // return IndexedColor(232 + greyIdx) //nolint:gosec
+ // }
+ // return IndexedColor(16 + ci) //nolint:gosec
+}
+
+// Convert16 converts a [color.Color] to a 16-color ANSI color. It will first
+// try to find a match in the 256 xterm(1) color palette, and then map that to
+// the 16-color ANSI palette.
+func Convert16(c color.Color) BasicColor {
+ switch c := c.(type) {
+ case BasicColor:
+ // If the color is already a BasicColor, return it.
+ return c
+ case IndexedColor:
+ // If the color is already an IndexedColor, return the corresponding
+ // BasicColor.
+ return ansi256To16[c]
+ default:
+ c256 := Convert256(c)
+ return ansi256To16[c256]
+ }
+}
+
+// RGB values of ANSI colors (0-255).
+var ansiHex = [...]color.RGBA{
+ 0: {R: 0x00, G: 0x00, B: 0x00, A: 0xff}, // "#000000"
+ 1: {R: 0x80, G: 0x00, B: 0x00, A: 0xff}, // "#800000"
+ 2: {R: 0x00, G: 0x80, B: 0x00, A: 0xff}, // "#008000"
+ 3: {R: 0x80, G: 0x80, B: 0x00, A: 0xff}, // "#808000"
+ 4: {R: 0x00, G: 0x00, B: 0x80, A: 0xff}, // "#000080"
+ 5: {R: 0x80, G: 0x00, B: 0x80, A: 0xff}, // "#800080"
+ 6: {R: 0x00, G: 0x80, B: 0x80, A: 0xff}, // "#008080"
+ 7: {R: 0xc0, G: 0xc0, B: 0xc0, A: 0xff}, // "#c0c0c0"
+ 8: {R: 0x80, G: 0x80, B: 0x80, A: 0xff}, // "#808080"
+ 9: {R: 0xff, G: 0x00, B: 0x00, A: 0xff}, // "#ff0000"
+ 10: {R: 0x00, G: 0xff, B: 0x00, A: 0xff}, // "#00ff00"
+ 11: {R: 0xff, G: 0xff, B: 0x00, A: 0xff}, // "#ffff00"
+ 12: {R: 0x00, G: 0x00, B: 0xff, A: 0xff}, // "#0000ff"
+ 13: {R: 0xff, G: 0x00, B: 0xff, A: 0xff}, // "#ff00ff"
+ 14: {R: 0x00, G: 0xff, B: 0xff, A: 0xff}, // "#00ffff"
+ 15: {R: 0xff, G: 0xff, B: 0xff, A: 0xff}, // "#ffffff"
+ 16: {R: 0x00, G: 0x00, B: 0x00, A: 0xff}, // "#000000"
+ 17: {R: 0x00, G: 0x00, B: 0x5f, A: 0xff}, // "#00005f"
+ 18: {R: 0x00, G: 0x00, B: 0x87, A: 0xff}, // "#000087"
+ 19: {R: 0x00, G: 0x00, B: 0xaf, A: 0xff}, // "#0000af"
+ 20: {R: 0x00, G: 0x00, B: 0xd7, A: 0xff}, // "#0000d7"
+ 21: {R: 0x00, G: 0x00, B: 0xff, A: 0xff}, // "#0000ff"
+ 22: {R: 0x00, G: 0x5f, B: 0x00, A: 0xff}, // "#005f00"
+ 23: {R: 0x00, G: 0x5f, B: 0x5f, A: 0xff}, // "#005f5f"
+ 24: {R: 0x00, G: 0x5f, B: 0x87, A: 0xff}, // "#005f87"
+ 25: {R: 0x00, G: 0x5f, B: 0xaf, A: 0xff}, // "#005faf"
+ 26: {R: 0x00, G: 0x5f, B: 0xd7, A: 0xff}, // "#005fd7"
+ 27: {R: 0x00, G: 0x5f, B: 0xff, A: 0xff}, // "#005fff"
+ 28: {R: 0x00, G: 0x87, B: 0x00, A: 0xff}, // "#008700"
+ 29: {R: 0x00, G: 0x87, B: 0x5f, A: 0xff}, // "#00875f"
+ 30: {R: 0x00, G: 0x87, B: 0x87, A: 0xff}, // "#008787"
+ 31: {R: 0x00, G: 0x87, B: 0xaf, A: 0xff}, // "#0087af"
+ 32: {R: 0x00, G: 0x87, B: 0xd7, A: 0xff}, // "#0087d7"
+ 33: {R: 0x00, G: 0x87, B: 0xff, A: 0xff}, // "#0087ff"
+ 34: {R: 0x00, G: 0xaf, B: 0x00, A: 0xff}, // "#00af00"
+ 35: {R: 0x00, G: 0xaf, B: 0x5f, A: 0xff}, // "#00af5f"
+ 36: {R: 0x00, G: 0xaf, B: 0x87, A: 0xff}, // "#00af87"
+ 37: {R: 0x00, G: 0xaf, B: 0xaf, A: 0xff}, // "#00afaf"
+ 38: {R: 0x00, G: 0xaf, B: 0xd7, A: 0xff}, // "#00afd7"
+ 39: {R: 0x00, G: 0xaf, B: 0xff, A: 0xff}, // "#00afff"
+ 40: {R: 0x00, G: 0xd7, B: 0x00, A: 0xff}, // "#00d700"
+ 41: {R: 0x00, G: 0xd7, B: 0x5f, A: 0xff}, // "#00d75f"
+ 42: {R: 0x00, G: 0xd7, B: 0x87, A: 0xff}, // "#00d787"
+ 43: {R: 0x00, G: 0xd7, B: 0xaf, A: 0xff}, // "#00d7af"
+ 44: {R: 0x00, G: 0xd7, B: 0xd7, A: 0xff}, // "#00d7d7"
+ 45: {R: 0x00, G: 0xd7, B: 0xff, A: 0xff}, // "#00d7ff"
+ 46: {R: 0x00, G: 0xff, B: 0x00, A: 0xff}, // "#00ff00"
+ 47: {R: 0x00, G: 0xff, B: 0x5f, A: 0xff}, // "#00ff5f"
+ 48: {R: 0x00, G: 0xff, B: 0x87, A: 0xff}, // "#00ff87"
+ 49: {R: 0x00, G: 0xff, B: 0xaf, A: 0xff}, // "#00ffaf"
+ 50: {R: 0x00, G: 0xff, B: 0xd7, A: 0xff}, // "#00ffd7"
+ 51: {R: 0x00, G: 0xff, B: 0xff, A: 0xff}, // "#00ffff"
+ 52: {R: 0x5f, G: 0x00, B: 0x00, A: 0xff}, // "#5f0000"
+ 53: {R: 0x5f, G: 0x00, B: 0x5f, A: 0xff}, // "#5f005f"
+ 54: {R: 0x5f, G: 0x00, B: 0x87, A: 0xff}, // "#5f0087"
+ 55: {R: 0x5f, G: 0x00, B: 0xaf, A: 0xff}, // "#5f00af"
+ 56: {R: 0x5f, G: 0x00, B: 0xd7, A: 0xff}, // "#5f00d7"
+ 57: {R: 0x5f, G: 0x00, B: 0xff, A: 0xff}, // "#5f00ff"
+ 58: {R: 0x5f, G: 0x5f, B: 0x00, A: 0xff}, // "#5f5f00"
+ 59: {R: 0x5f, G: 0x5f, B: 0x5f, A: 0xff}, // "#5f5f5f"
+ 60: {R: 0x5f, G: 0x5f, B: 0x87, A: 0xff}, // "#5f5f87"
+ 61: {R: 0x5f, G: 0x5f, B: 0xaf, A: 0xff}, // "#5f5faf"
+ 62: {R: 0x5f, G: 0x5f, B: 0xd7, A: 0xff}, // "#5f5fd7"
+ 63: {R: 0x5f, G: 0x5f, B: 0xff, A: 0xff}, // "#5f5fff"
+ 64: {R: 0x5f, G: 0x87, B: 0x00, A: 0xff}, // "#5f8700"
+ 65: {R: 0x5f, G: 0x87, B: 0x5f, A: 0xff}, // "#5f875f"
+ 66: {R: 0x5f, G: 0x87, B: 0x87, A: 0xff}, // "#5f8787"
+ 67: {R: 0x5f, G: 0x87, B: 0xaf, A: 0xff}, // "#5f87af"
+ 68: {R: 0x5f, G: 0x87, B: 0xd7, A: 0xff}, // "#5f87d7"
+ 69: {R: 0x5f, G: 0x87, B: 0xff, A: 0xff}, // "#5f87ff"
+ 70: {R: 0x5f, G: 0xaf, B: 0x00, A: 0xff}, // "#5faf00"
+ 71: {R: 0x5f, G: 0xaf, B: 0x5f, A: 0xff}, // "#5faf5f"
+ 72: {R: 0x5f, G: 0xaf, B: 0x87, A: 0xff}, // "#5faf87"
+ 73: {R: 0x5f, G: 0xaf, B: 0xaf, A: 0xff}, // "#5fafaf"
+ 74: {R: 0x5f, G: 0xaf, B: 0xd7, A: 0xff}, // "#5fafd7"
+ 75: {R: 0x5f, G: 0xaf, B: 0xff, A: 0xff}, // "#5fafff"
+ 76: {R: 0x5f, G: 0xd7, B: 0x00, A: 0xff}, // "#5fd700"
+ 77: {R: 0x5f, G: 0xd7, B: 0x5f, A: 0xff}, // "#5fd75f"
+ 78: {R: 0x5f, G: 0xd7, B: 0x87, A: 0xff}, // "#5fd787"
+ 79: {R: 0x5f, G: 0xd7, B: 0xaf, A: 0xff}, // "#5fd7af"
+ 80: {R: 0x5f, G: 0xd7, B: 0xd7, A: 0xff}, // "#5fd7d7"
+ 81: {R: 0x5f, G: 0xd7, B: 0xff, A: 0xff}, // "#5fd7ff"
+ 82: {R: 0x5f, G: 0xff, B: 0x00, A: 0xff}, // "#5fff00"
+ 83: {R: 0x5f, G: 0xff, B: 0x5f, A: 0xff}, // "#5fff5f"
+ 84: {R: 0x5f, G: 0xff, B: 0x87, A: 0xff}, // "#5fff87"
+ 85: {R: 0x5f, G: 0xff, B: 0xaf, A: 0xff}, // "#5fffaf"
+ 86: {R: 0x5f, G: 0xff, B: 0xd7, A: 0xff}, // "#5fffd7"
+ 87: {R: 0x5f, G: 0xff, B: 0xff, A: 0xff}, // "#5fffff"
+ 88: {R: 0x87, G: 0x00, B: 0x00, A: 0xff}, // "#870000"
+ 89: {R: 0x87, G: 0x00, B: 0x5f, A: 0xff}, // "#87005f"
+ 90: {R: 0x87, G: 0x00, B: 0x87, A: 0xff}, // "#870087"
+ 91: {R: 0x87, G: 0x00, B: 0xaf, A: 0xff}, // "#8700af"
+ 92: {R: 0x87, G: 0x00, B: 0xd7, A: 0xff}, // "#8700d7"
+ 93: {R: 0x87, G: 0x00, B: 0xff, A: 0xff}, // "#8700ff"
+ 94: {R: 0x87, G: 0x5f, B: 0x00, A: 0xff}, // "#875f00"
+ 95: {R: 0x87, G: 0x5f, B: 0x5f, A: 0xff}, // "#875f5f"
+ 96: {R: 0x87, G: 0x5f, B: 0x87, A: 0xff}, // "#875f87"
+ 97: {R: 0x87, G: 0x5f, B: 0xaf, A: 0xff}, // "#875faf"
+ 98: {R: 0x87, G: 0x5f, B: 0xd7, A: 0xff}, // "#875fd7"
+ 99: {R: 0x87, G: 0x5f, B: 0xff, A: 0xff}, // "#875fff"
+ 100: {R: 0x87, G: 0x87, B: 0x00, A: 0xff}, // "#878700"
+ 101: {R: 0x87, G: 0x87, B: 0x5f, A: 0xff}, // "#87875f"
+ 102: {R: 0x87, G: 0x87, B: 0x87, A: 0xff}, // "#878787"
+ 103: {R: 0x87, G: 0x87, B: 0xaf, A: 0xff}, // "#8787af"
+ 104: {R: 0x87, G: 0x87, B: 0xd7, A: 0xff}, // "#8787d7"
+ 105: {R: 0x87, G: 0x87, B: 0xff, A: 0xff}, // "#8787ff"
+ 106: {R: 0x87, G: 0xaf, B: 0x00, A: 0xff}, // "#87af00"
+ 107: {R: 0x87, G: 0xaf, B: 0x5f, A: 0xff}, // "#87af5f"
+ 108: {R: 0x87, G: 0xaf, B: 0x87, A: 0xff}, // "#87af87"
+ 109: {R: 0x87, G: 0xaf, B: 0xaf, A: 0xff}, // "#87afaf"
+ 110: {R: 0x87, G: 0xaf, B: 0xd7, A: 0xff}, // "#87afd7"
+ 111: {R: 0x87, G: 0xaf, B: 0xff, A: 0xff}, // "#87afff"
+ 112: {R: 0x87, G: 0xd7, B: 0x00, A: 0xff}, // "#87d700"
+ 113: {R: 0x87, G: 0xd7, B: 0x5f, A: 0xff}, // "#87d75f"
+ 114: {R: 0x87, G: 0xd7, B: 0x87, A: 0xff}, // "#87d787"
+ 115: {R: 0x87, G: 0xd7, B: 0xaf, A: 0xff}, // "#87d7af"
+ 116: {R: 0x87, G: 0xd7, B: 0xd7, A: 0xff}, // "#87d7d7"
+ 117: {R: 0x87, G: 0xd7, B: 0xff, A: 0xff}, // "#87d7ff"
+ 118: {R: 0x87, G: 0xff, B: 0x00, A: 0xff}, // "#87ff00"
+ 119: {R: 0x87, G: 0xff, B: 0x5f, A: 0xff}, // "#87ff5f"
+ 120: {R: 0x87, G: 0xff, B: 0x87, A: 0xff}, // "#87ff87"
+ 121: {R: 0x87, G: 0xff, B: 0xaf, A: 0xff}, // "#87ffaf"
+ 122: {R: 0x87, G: 0xff, B: 0xd7, A: 0xff}, // "#87ffd7"
+ 123: {R: 0x87, G: 0xff, B: 0xff, A: 0xff}, // "#87ffff"
+ 124: {R: 0xaf, G: 0x00, B: 0x00, A: 0xff}, // "#af0000"
+ 125: {R: 0xaf, G: 0x00, B: 0x5f, A: 0xff}, // "#af005f"
+ 126: {R: 0xaf, G: 0x00, B: 0x87, A: 0xff}, // "#af0087"
+ 127: {R: 0xaf, G: 0x00, B: 0xaf, A: 0xff}, // "#af00af"
+ 128: {R: 0xaf, G: 0x00, B: 0xd7, A: 0xff}, // "#af00d7"
+ 129: {R: 0xaf, G: 0x00, B: 0xff, A: 0xff}, // "#af00ff"
+ 130: {R: 0xaf, G: 0x5f, B: 0x00, A: 0xff}, // "#af5f00"
+ 131: {R: 0xaf, G: 0x5f, B: 0x5f, A: 0xff}, // "#af5f5f"
+ 132: {R: 0xaf, G: 0x5f, B: 0x87, A: 0xff}, // "#af5f87"
+ 133: {R: 0xaf, G: 0x5f, B: 0xaf, A: 0xff}, // "#af5faf"
+ 134: {R: 0xaf, G: 0x5f, B: 0xd7, A: 0xff}, // "#af5fd7"
+ 135: {R: 0xaf, G: 0x5f, B: 0xff, A: 0xff}, // "#af5fff"
+ 136: {R: 0xaf, G: 0x87, B: 0x00, A: 0xff}, // "#af8700"
+ 137: {R: 0xaf, G: 0x87, B: 0x5f, A: 0xff}, // "#af875f"
+ 138: {R: 0xaf, G: 0x87, B: 0x87, A: 0xff}, // "#af8787"
+ 139: {R: 0xaf, G: 0x87, B: 0xaf, A: 0xff}, // "#af87af"
+ 140: {R: 0xaf, G: 0x87, B: 0xd7, A: 0xff}, // "#af87d7"
+ 141: {R: 0xaf, G: 0x87, B: 0xff, A: 0xff}, // "#af87ff"
+ 142: {R: 0xaf, G: 0xaf, B: 0x00, A: 0xff}, // "#afaf00"
+ 143: {R: 0xaf, G: 0xaf, B: 0x5f, A: 0xff}, // "#afaf5f"
+ 144: {R: 0xaf, G: 0xaf, B: 0x87, A: 0xff}, // "#afaf87"
+ 145: {R: 0xaf, G: 0xaf, B: 0xaf, A: 0xff}, // "#afafaf"
+ 146: {R: 0xaf, G: 0xaf, B: 0xd7, A: 0xff}, // "#afafd7"
+ 147: {R: 0xaf, G: 0xaf, B: 0xff, A: 0xff}, // "#afafff"
+ 148: {R: 0xaf, G: 0xd7, B: 0x00, A: 0xff}, // "#afd700"
+ 149: {R: 0xaf, G: 0xd7, B: 0x5f, A: 0xff}, // "#afd75f"
+ 150: {R: 0xaf, G: 0xd7, B: 0x87, A: 0xff}, // "#afd787"
+ 151: {R: 0xaf, G: 0xd7, B: 0xaf, A: 0xff}, // "#afd7af"
+ 152: {R: 0xaf, G: 0xd7, B: 0xd7, A: 0xff}, // "#afd7d7"
+ 153: {R: 0xaf, G: 0xd7, B: 0xff, A: 0xff}, // "#afd7ff"
+ 154: {R: 0xaf, G: 0xff, B: 0x00, A: 0xff}, // "#afff00"
+ 155: {R: 0xaf, G: 0xff, B: 0x5f, A: 0xff}, // "#afff5f"
+ 156: {R: 0xaf, G: 0xff, B: 0x87, A: 0xff}, // "#afff87"
+ 157: {R: 0xaf, G: 0xff, B: 0xaf, A: 0xff}, // "#afffaf"
+ 158: {R: 0xaf, G: 0xff, B: 0xd7, A: 0xff}, // "#afffd7"
+ 159: {R: 0xaf, G: 0xff, B: 0xff, A: 0xff}, // "#afffff"
+ 160: {R: 0xd7, G: 0x00, B: 0x00, A: 0xff}, // "#d70000"
+ 161: {R: 0xd7, G: 0x00, B: 0x5f, A: 0xff}, // "#d7005f"
+ 162: {R: 0xd7, G: 0x00, B: 0x87, A: 0xff}, // "#d70087"
+ 163: {R: 0xd7, G: 0x00, B: 0xaf, A: 0xff}, // "#d700af"
+ 164: {R: 0xd7, G: 0x00, B: 0xd7, A: 0xff}, // "#d700d7"
+ 165: {R: 0xd7, G: 0x00, B: 0xff, A: 0xff}, // "#d700ff"
+ 166: {R: 0xd7, G: 0x5f, B: 0x00, A: 0xff}, // "#d75f00"
+ 167: {R: 0xd7, G: 0x5f, B: 0x5f, A: 0xff}, // "#d75f5f"
+ 168: {R: 0xd7, G: 0x5f, B: 0x87, A: 0xff}, // "#d75f87"
+ 169: {R: 0xd7, G: 0x5f, B: 0xaf, A: 0xff}, // "#d75faf"
+ 170: {R: 0xd7, G: 0x5f, B: 0xd7, A: 0xff}, // "#d75fd7"
+ 171: {R: 0xd7, G: 0x5f, B: 0xff, A: 0xff}, // "#d75fff"
+ 172: {R: 0xd7, G: 0x87, B: 0x00, A: 0xff}, // "#d78700"
+ 173: {R: 0xd7, G: 0x87, B: 0x5f, A: 0xff}, // "#d7875f"
+ 174: {R: 0xd7, G: 0x87, B: 0x87, A: 0xff}, // "#d78787"
+ 175: {R: 0xd7, G: 0x87, B: 0xaf, A: 0xff}, // "#d787af"
+ 176: {R: 0xd7, G: 0x87, B: 0xd7, A: 0xff}, // "#d787d7"
+ 177: {R: 0xd7, G: 0x87, B: 0xff, A: 0xff}, // "#d787ff"
+ 178: {R: 0xd7, G: 0xaf, B: 0x00, A: 0xff}, // "#d7af00"
+ 179: {R: 0xd7, G: 0xaf, B: 0x5f, A: 0xff}, // "#d7af5f"
+ 180: {R: 0xd7, G: 0xaf, B: 0x87, A: 0xff}, // "#d7af87"
+ 181: {R: 0xd7, G: 0xaf, B: 0xaf, A: 0xff}, // "#d7afaf"
+ 182: {R: 0xd7, G: 0xaf, B: 0xd7, A: 0xff}, // "#d7afd7"
+ 183: {R: 0xd7, G: 0xaf, B: 0xff, A: 0xff}, // "#d7afff"
+ 184: {R: 0xd7, G: 0xd7, B: 0x00, A: 0xff}, // "#d7d700"
+ 185: {R: 0xd7, G: 0xd7, B: 0x5f, A: 0xff}, // "#d7d75f"
+ 186: {R: 0xd7, G: 0xd7, B: 0x87, A: 0xff}, // "#d7d787"
+ 187: {R: 0xd7, G: 0xd7, B: 0xaf, A: 0xff}, // "#d7d7af"
+ 188: {R: 0xd7, G: 0xd7, B: 0xd7, A: 0xff}, // "#d7d7d7"
+ 189: {R: 0xd7, G: 0xd7, B: 0xff, A: 0xff}, // "#d7d7ff"
+ 190: {R: 0xd7, G: 0xff, B: 0x00, A: 0xff}, // "#d7ff00"
+ 191: {R: 0xd7, G: 0xff, B: 0x5f, A: 0xff}, // "#d7ff5f"
+ 192: {R: 0xd7, G: 0xff, B: 0x87, A: 0xff}, // "#d7ff87"
+ 193: {R: 0xd7, G: 0xff, B: 0xaf, A: 0xff}, // "#d7ffaf"
+ 194: {R: 0xd7, G: 0xff, B: 0xd7, A: 0xff}, // "#d7ffd7"
+ 195: {R: 0xd7, G: 0xff, B: 0xff, A: 0xff}, // "#d7ffff"
+ 196: {R: 0xff, G: 0x00, B: 0x00, A: 0xff}, // "#ff0000"
+ 197: {R: 0xff, G: 0x00, B: 0x5f, A: 0xff}, // "#ff005f"
+ 198: {R: 0xff, G: 0x00, B: 0x87, A: 0xff}, // "#ff0087"
+ 199: {R: 0xff, G: 0x00, B: 0xaf, A: 0xff}, // "#ff00af"
+ 200: {R: 0xff, G: 0x00, B: 0xd7, A: 0xff}, // "#ff00d7"
+ 201: {R: 0xff, G: 0x00, B: 0xff, A: 0xff}, // "#ff00ff"
+ 202: {R: 0xff, G: 0x5f, B: 0x00, A: 0xff}, // "#ff5f00"
+ 203: {R: 0xff, G: 0x5f, B: 0x5f, A: 0xff}, // "#ff5f5f"
+ 204: {R: 0xff, G: 0x5f, B: 0x87, A: 0xff}, // "#ff5f87"
+ 205: {R: 0xff, G: 0x5f, B: 0xaf, A: 0xff}, // "#ff5faf"
+ 206: {R: 0xff, G: 0x5f, B: 0xd7, A: 0xff}, // "#ff5fd7"
+ 207: {R: 0xff, G: 0x5f, B: 0xff, A: 0xff}, // "#ff5fff"
+ 208: {R: 0xff, G: 0x87, B: 0x00, A: 0xff}, // "#ff8700"
+ 209: {R: 0xff, G: 0x87, B: 0x5f, A: 0xff}, // "#ff875f"
+ 210: {R: 0xff, G: 0x87, B: 0x87, A: 0xff}, // "#ff8787"
+ 211: {R: 0xff, G: 0x87, B: 0xaf, A: 0xff}, // "#ff87af"
+ 212: {R: 0xff, G: 0x87, B: 0xd7, A: 0xff}, // "#ff87d7"
+ 213: {R: 0xff, G: 0x87, B: 0xff, A: 0xff}, // "#ff87ff"
+ 214: {R: 0xff, G: 0xaf, B: 0x00, A: 0xff}, // "#ffaf00"
+ 215: {R: 0xff, G: 0xaf, B: 0x5f, A: 0xff}, // "#ffaf5f"
+ 216: {R: 0xff, G: 0xaf, B: 0x87, A: 0xff}, // "#ffaf87"
+ 217: {R: 0xff, G: 0xaf, B: 0xaf, A: 0xff}, // "#ffafaf"
+ 218: {R: 0xff, G: 0xaf, B: 0xd7, A: 0xff}, // "#ffafd7"
+ 219: {R: 0xff, G: 0xaf, B: 0xff, A: 0xff}, // "#ffafff"
+ 220: {R: 0xff, G: 0xd7, B: 0x00, A: 0xff}, // "#ffd700"
+ 221: {R: 0xff, G: 0xd7, B: 0x5f, A: 0xff}, // "#ffd75f"
+ 222: {R: 0xff, G: 0xd7, B: 0x87, A: 0xff}, // "#ffd787"
+ 223: {R: 0xff, G: 0xd7, B: 0xaf, A: 0xff}, // "#ffd7af"
+ 224: {R: 0xff, G: 0xd7, B: 0xd7, A: 0xff}, // "#ffd7d7"
+ 225: {R: 0xff, G: 0xd7, B: 0xff, A: 0xff}, // "#ffd7ff"
+ 226: {R: 0xff, G: 0xff, B: 0x00, A: 0xff}, // "#ffff00"
+ 227: {R: 0xff, G: 0xff, B: 0x5f, A: 0xff}, // "#ffff5f"
+ 228: {R: 0xff, G: 0xff, B: 0x87, A: 0xff}, // "#ffff87"
+ 229: {R: 0xff, G: 0xff, B: 0xaf, A: 0xff}, // "#ffffaf"
+ 230: {R: 0xff, G: 0xff, B: 0xd7, A: 0xff}, // "#ffffd7"
+ 231: {R: 0xff, G: 0xff, B: 0xff, A: 0xff}, // "#ffffff"
+ 232: {R: 0x08, G: 0x08, B: 0x08, A: 0xff}, // "#080808"
+ 233: {R: 0x12, G: 0x12, B: 0x12, A: 0xff}, // "#121212"
+ 234: {R: 0x1c, G: 0x1c, B: 0x1c, A: 0xff}, // "#1c1c1c"
+ 235: {R: 0x26, G: 0x26, B: 0x26, A: 0xff}, // "#262626"
+ 236: {R: 0x30, G: 0x30, B: 0x30, A: 0xff}, // "#303030"
+ 237: {R: 0x3a, G: 0x3a, B: 0x3a, A: 0xff}, // "#3a3a3a"
+ 238: {R: 0x44, G: 0x44, B: 0x44, A: 0xff}, // "#444444"
+ 239: {R: 0x4e, G: 0x4e, B: 0x4e, A: 0xff}, // "#4e4e4e"
+ 240: {R: 0x58, G: 0x58, B: 0x58, A: 0xff}, // "#585858"
+ 241: {R: 0x62, G: 0x62, B: 0x62, A: 0xff}, // "#626262"
+ 242: {R: 0x6c, G: 0x6c, B: 0x6c, A: 0xff}, // "#6c6c6c"
+ 243: {R: 0x76, G: 0x76, B: 0x76, A: 0xff}, // "#767676"
+ 244: {R: 0x80, G: 0x80, B: 0x80, A: 0xff}, // "#808080"
+ 245: {R: 0x8a, G: 0x8a, B: 0x8a, A: 0xff}, // "#8a8a8a"
+ 246: {R: 0x94, G: 0x94, B: 0x94, A: 0xff}, // "#949494"
+ 247: {R: 0x9e, G: 0x9e, B: 0x9e, A: 0xff}, // "#9e9e9e"
+ 248: {R: 0xa8, G: 0xa8, B: 0xa8, A: 0xff}, // "#a8a8a8"
+ 249: {R: 0xb2, G: 0xb2, B: 0xb2, A: 0xff}, // "#b2b2b2"
+ 250: {R: 0xbc, G: 0xbc, B: 0xbc, A: 0xff}, // "#bcbcbc"
+ 251: {R: 0xc6, G: 0xc6, B: 0xc6, A: 0xff}, // "#c6c6c6"
+ 252: {R: 0xd0, G: 0xd0, B: 0xd0, A: 0xff}, // "#d0d0d0"
+ 253: {R: 0xda, G: 0xda, B: 0xda, A: 0xff}, // "#dadada"
+ 254: {R: 0xe4, G: 0xe4, B: 0xe4, A: 0xff}, // "#e4e4e4"
+ 255: {R: 0xee, G: 0xee, B: 0xee, A: 0xff}, // "#eeeeee"
+}
+
+var ansi256To16 = [...]BasicColor{
+ 0: 0,
+ 1: 1,
+ 2: 2,
+ 3: 3,
+ 4: 4,
+ 5: 5,
+ 6: 6,
+ 7: 7,
+ 8: 8,
+ 9: 9,
+ 10: 10,
+ 11: 11,
+ 12: 12,
+ 13: 13,
+ 14: 14,
+ 15: 15,
+ 16: 0,
+ 17: 4,
+ 18: 4,
+ 19: 4,
+ 20: 12,
+ 21: 12,
+ 22: 2,
+ 23: 6,
+ 24: 4,
+ 25: 4,
+ 26: 12,
+ 27: 12,
+ 28: 2,
+ 29: 2,
+ 30: 6,
+ 31: 4,
+ 32: 12,
+ 33: 12,
+ 34: 2,
+ 35: 2,
+ 36: 2,
+ 37: 6,
+ 38: 12,
+ 39: 12,
+ 40: 10,
+ 41: 10,
+ 42: 10,
+ 43: 10,
+ 44: 14,
+ 45: 12,
+ 46: 10,
+ 47: 10,
+ 48: 10,
+ 49: 10,
+ 50: 10,
+ 51: 14,
+ 52: 1,
+ 53: 5,
+ 54: 4,
+ 55: 4,
+ 56: 12,
+ 57: 12,
+ 58: 3,
+ 59: 8,
+ 60: 4,
+ 61: 4,
+ 62: 12,
+ 63: 12,
+ 64: 2,
+ 65: 2,
+ 66: 6,
+ 67: 4,
+ 68: 12,
+ 69: 12,
+ 70: 2,
+ 71: 2,
+ 72: 2,
+ 73: 6,
+ 74: 12,
+ 75: 12,
+ 76: 10,
+ 77: 10,
+ 78: 10,
+ 79: 10,
+ 80: 14,
+ 81: 12,
+ 82: 10,
+ 83: 10,
+ 84: 10,
+ 85: 10,
+ 86: 10,
+ 87: 14,
+ 88: 1,
+ 89: 1,
+ 90: 5,
+ 91: 4,
+ 92: 12,
+ 93: 12,
+ 94: 1,
+ 95: 1,
+ 96: 5,
+ 97: 4,
+ 98: 12,
+ 99: 12,
+ 100: 3,
+ 101: 3,
+ 102: 8,
+ 103: 4,
+ 104: 12,
+ 105: 12,
+ 106: 2,
+ 107: 2,
+ 108: 2,
+ 109: 6,
+ 110: 12,
+ 111: 12,
+ 112: 10,
+ 113: 10,
+ 114: 10,
+ 115: 10,
+ 116: 14,
+ 117: 12,
+ 118: 10,
+ 119: 10,
+ 120: 10,
+ 121: 10,
+ 122: 10,
+ 123: 14,
+ 124: 1,
+ 125: 1,
+ 126: 1,
+ 127: 5,
+ 128: 12,
+ 129: 12,
+ 130: 1,
+ 131: 1,
+ 132: 1,
+ 133: 5,
+ 134: 12,
+ 135: 12,
+ 136: 1,
+ 137: 1,
+ 138: 1,
+ 139: 5,
+ 140: 12,
+ 141: 12,
+ 142: 3,
+ 143: 3,
+ 144: 3,
+ 145: 7,
+ 146: 12,
+ 147: 12,
+ 148: 10,
+ 149: 10,
+ 150: 10,
+ 151: 10,
+ 152: 14,
+ 153: 12,
+ 154: 10,
+ 155: 10,
+ 156: 10,
+ 157: 10,
+ 158: 10,
+ 159: 14,
+ 160: 9,
+ 161: 9,
+ 162: 9,
+ 163: 9,
+ 164: 13,
+ 165: 12,
+ 166: 9,
+ 167: 9,
+ 168: 9,
+ 169: 9,
+ 170: 13,
+ 171: 12,
+ 172: 9,
+ 173: 9,
+ 174: 9,
+ 175: 9,
+ 176: 13,
+ 177: 12,
+ 178: 9,
+ 179: 9,
+ 180: 9,
+ 181: 9,
+ 182: 13,
+ 183: 12,
+ 184: 11,
+ 185: 11,
+ 186: 11,
+ 187: 11,
+ 188: 7,
+ 189: 12,
+ 190: 10,
+ 191: 10,
+ 192: 10,
+ 193: 10,
+ 194: 10,
+ 195: 14,
+ 196: 9,
+ 197: 9,
+ 198: 9,
+ 199: 9,
+ 200: 9,
+ 201: 13,
+ 202: 9,
+ 203: 9,
+ 204: 9,
+ 205: 9,
+ 206: 9,
+ 207: 13,
+ 208: 9,
+ 209: 9,
+ 210: 9,
+ 211: 9,
+ 212: 9,
+ 213: 13,
+ 214: 9,
+ 215: 9,
+ 216: 9,
+ 217: 9,
+ 218: 9,
+ 219: 13,
+ 220: 9,
+ 221: 9,
+ 222: 9,
+ 223: 9,
+ 224: 9,
+ 225: 13,
+ 226: 11,
+ 227: 11,
+ 228: 11,
+ 229: 11,
+ 230: 11,
+ 231: 15,
+ 232: 0,
+ 233: 0,
+ 234: 0,
+ 235: 0,
+ 236: 0,
+ 237: 0,
+ 238: 8,
+ 239: 8,
+ 240: 8,
+ 241: 8,
+ 242: 8,
+ 243: 8,
+ 244: 7,
+ 245: 7,
+ 246: 7,
+ 247: 7,
+ 248: 7,
+ 249: 7,
+ 250: 15,
+ 251: 15,
+ 252: 15,
+ 253: 15,
+ 254: 15,
+ 255: 15,
+}
diff --git a/vendor/github.com/charmbracelet/x/ansi/ctrl.go b/vendor/github.com/charmbracelet/x/ansi/ctrl.go
index 8ca744cf..64bcf113 100644
--- a/vendor/github.com/charmbracelet/x/ansi/ctrl.go
+++ b/vendor/github.com/charmbracelet/x/ansi/ctrl.go
@@ -38,6 +38,25 @@ const RequestXTVersion = RequestNameVersion
// If no attributes are given, or if the attribute is 0, this function returns
// the request sequence. Otherwise, it returns the response sequence.
//
+// Common attributes include:
+// - 1 132 columns
+// - 2 Printer port
+// - 4 Sixel
+// - 6 Selective erase
+// - 7 Soft character set (DRCS)
+// - 8 User-defined keys (UDKs)
+// - 9 National replacement character sets (NRCS) (International terminal only)
+// - 12 Yugoslavian (SCS)
+// - 15 Technical character set
+// - 18 Windowing capability
+// - 21 Horizontal scrolling
+// - 23 Greek
+// - 24 Turkish
+// - 42 ISO Latin-2 character set
+// - 44 PCTerm
+// - 45 Soft key map
+// - 46 ASCII emulation
+//
// See https://vt100.net/docs/vt510-rm/DA1.html
func PrimaryDeviceAttributes(attrs ...int) string {
if len(attrs) == 0 {
diff --git a/vendor/github.com/charmbracelet/x/ansi/cursor.go b/vendor/github.com/charmbracelet/x/ansi/cursor.go
index 0c364d60..4adf6896 100644
--- a/vendor/github.com/charmbracelet/x/ansi/cursor.go
+++ b/vendor/github.com/charmbracelet/x/ansi/cursor.go
@@ -1,6 +1,8 @@
package ansi
-import "strconv"
+import (
+ "strconv"
+)
// SaveCursor (DECSC) is an escape sequence that saves the current cursor
// position.
@@ -260,7 +262,7 @@ func CHA(col int) string {
// See: https://vt100.net/docs/vt510-rm/CUP.html
func CursorPosition(col, row int) string {
if row <= 0 && col <= 0 {
- return HomeCursorPosition
+ return CursorHomePosition
}
var r, c string
@@ -356,8 +358,8 @@ func CHT(n int) string {
return CursorHorizontalForwardTab(n)
}
-// EraseCharacter (ECH) returns a sequence for erasing n characters and moving
-// the cursor to the right. This doesn't affect other cell attributes.
+// EraseCharacter (ECH) returns a sequence for erasing n characters from the
+// screen. This doesn't affect other cell attributes.
//
// Default is 1.
//
@@ -589,7 +591,7 @@ const ReverseIndex = "\x1bM"
//
// Default is 1.
//
-// CSI n `
+// CSI n \`
//
// See: https://vt100.net/docs/vt510-rm/HPA.html
func HorizontalPositionAbsolute(col int) string {
diff --git a/vendor/github.com/charmbracelet/x/ansi/finalterm.go b/vendor/github.com/charmbracelet/x/ansi/finalterm.go
new file mode 100644
index 00000000..2c283472
--- /dev/null
+++ b/vendor/github.com/charmbracelet/x/ansi/finalterm.go
@@ -0,0 +1,67 @@
+package ansi
+
+import "strings"
+
+// FinalTerm returns an escape sequence that is used for shell integrations.
+// Originally, FinalTerm designed the protocol hence the name.
+//
+// OSC 133 ; Ps ; Pm ST
+// OSC 133 ; Ps ; Pm BEL
+//
+// See: https://iterm2.com/documentation-shell-integration.html
+func FinalTerm(pm ...string) string {
+ return "\x1b]133;" + strings.Join(pm, ";") + "\x07"
+}
+
+// FinalTermPrompt returns an escape sequence that is used for shell
+// integrations prompt marks. This is sent just before the start of the shell
+// prompt.
+//
+// This is an alias for FinalTerm("A").
+func FinalTermPrompt(pm ...string) string {
+ if len(pm) == 0 {
+ return FinalTerm("A")
+ }
+ return FinalTerm(append([]string{"A"}, pm...)...)
+}
+
+// FinalTermCmdStart returns an escape sequence that is used for shell
+// integrations command start marks. This is sent just after the end of the
+// shell prompt, before the user enters a command.
+//
+// This is an alias for FinalTerm("B").
+func FinalTermCmdStart(pm ...string) string {
+ if len(pm) == 0 {
+ return FinalTerm("B")
+ }
+ return FinalTerm(append([]string{"B"}, pm...)...)
+}
+
+// FinalTermCmdExecuted returns an escape sequence that is used for shell
+// integrations command executed marks. This is sent just before the start of
+// the command output.
+//
+// This is an alias for FinalTerm("C").
+func FinalTermCmdExecuted(pm ...string) string {
+ if len(pm) == 0 {
+ return FinalTerm("C")
+ }
+ return FinalTerm(append([]string{"C"}, pm...)...)
+}
+
+// FinalTermCmdFinished returns an escape sequence that is used for shell
+// integrations command finished marks.
+//
+// If the command was sent after
+// [FinalTermCmdStart], it indicates that the command was aborted. If the
+// command was sent after [FinalTermCmdExecuted], it indicates the end of the
+// command output. If neither was sent, [FinalTermCmdFinished] should be
+// ignored.
+//
+// This is an alias for FinalTerm("D").
+func FinalTermCmdFinished(pm ...string) string {
+ if len(pm) == 0 {
+ return FinalTerm("D")
+ }
+ return FinalTerm(append([]string{"D"}, pm...)...)
+}
diff --git a/vendor/github.com/charmbracelet/x/ansi/graphics.go b/vendor/github.com/charmbracelet/x/ansi/graphics.go
index 604fef47..d4a693b7 100644
--- a/vendor/github.com/charmbracelet/x/ansi/graphics.go
+++ b/vendor/github.com/charmbracelet/x/ansi/graphics.go
@@ -2,17 +2,47 @@ package ansi
import (
"bytes"
- "encoding/base64"
- "errors"
- "fmt"
- "image"
- "io"
- "os"
+ "strconv"
"strings"
-
- "github.com/charmbracelet/x/ansi/kitty"
)
+// SixelGraphics returns a sequence that encodes the given sixel image payload to
+// a DCS sixel sequence.
+//
+// DCS p1; p2; p3; q [sixel payload] ST
+//
+// p1 = pixel aspect ratio, deprecated and replaced by pixel metrics in the payload
+//
+// p2 = This is supposed to be 0 for transparency, but terminals don't seem to
+// to use it properly. Value 0 leaves an unsightly black bar on all terminals
+// I've tried and looks correct with value 1.
+//
+// p3 = Horizontal grid size parameter. Everyone ignores this and uses a fixed grid
+// size, as far as I can tell.
+//
+// See https://shuford.invisible-island.net/all_about_sixels.txt
+func SixelGraphics(p1, p2, p3 int, payload []byte) string {
+ var buf bytes.Buffer
+
+ buf.WriteString("\x1bP")
+ if p1 >= 0 {
+ buf.WriteString(strconv.Itoa(p1))
+ }
+ buf.WriteByte(';')
+ if p2 >= 0 {
+ buf.WriteString(strconv.Itoa(p2))
+ }
+ if p3 > 0 {
+ buf.WriteByte(';')
+ buf.WriteString(strconv.Itoa(p3))
+ }
+ buf.WriteByte('q')
+ buf.Write(payload)
+ buf.WriteString("\x1b\\")
+
+ return buf.String()
+}
+
// KittyGraphics returns a sequence that encodes the given image in the Kitty
// graphics protocol.
//
@@ -30,170 +60,3 @@ func KittyGraphics(payload []byte, opts ...string) string {
buf.WriteString("\x1b\\")
return buf.String()
}
-
-var (
- // KittyGraphicsTempDir is the directory where temporary files are stored.
- // This is used in [WriteKittyGraphics] along with [os.CreateTemp].
- KittyGraphicsTempDir = ""
-
- // KittyGraphicsTempPattern is the pattern used to create temporary files.
- // This is used in [WriteKittyGraphics] along with [os.CreateTemp].
- // The Kitty Graphics protocol requires the file path to contain the
- // substring "tty-graphics-protocol".
- KittyGraphicsTempPattern = "tty-graphics-protocol-*"
-)
-
-// WriteKittyGraphics writes an image using the Kitty Graphics protocol with
-// the given options to w. It chunks the written data if o.Chunk is true.
-//
-// You can omit m and use nil when rendering an image from a file. In this
-// case, you must provide a file path in o.File and use o.Transmission =
-// [kitty.File]. You can also use o.Transmission = [kitty.TempFile] to write
-// the image to a temporary file. In that case, the file path is ignored, and
-// the image is written to a temporary file that is automatically deleted by
-// the terminal.
-//
-// See https://sw.kovidgoyal.net/kitty/graphics-protocol/
-func WriteKittyGraphics(w io.Writer, m image.Image, o *kitty.Options) error {
- if o == nil {
- o = &kitty.Options{}
- }
-
- if o.Transmission == 0 && len(o.File) != 0 {
- o.Transmission = kitty.File
- }
-
- var data bytes.Buffer // the data to be encoded into base64
- e := &kitty.Encoder{
- Compress: o.Compression == kitty.Zlib,
- Format: o.Format,
- }
-
- switch o.Transmission {
- case kitty.Direct:
- if err := e.Encode(&data, m); err != nil {
- return fmt.Errorf("failed to encode direct image: %w", err)
- }
-
- case kitty.SharedMemory:
- // TODO: Implement shared memory
- return fmt.Errorf("shared memory transmission is not yet implemented")
-
- case kitty.File:
- if len(o.File) == 0 {
- return kitty.ErrMissingFile
- }
-
- f, err := os.Open(o.File)
- if err != nil {
- return fmt.Errorf("failed to open file: %w", err)
- }
-
- defer f.Close() //nolint:errcheck
-
- stat, err := f.Stat()
- if err != nil {
- return fmt.Errorf("failed to get file info: %w", err)
- }
-
- mode := stat.Mode()
- if !mode.IsRegular() {
- return fmt.Errorf("file is not a regular file")
- }
-
- // Write the file path to the buffer
- if _, err := data.WriteString(f.Name()); err != nil {
- return fmt.Errorf("failed to write file path to buffer: %w", err)
- }
-
- case kitty.TempFile:
- f, err := os.CreateTemp(KittyGraphicsTempDir, KittyGraphicsTempPattern)
- if err != nil {
- return fmt.Errorf("failed to create file: %w", err)
- }
-
- defer f.Close() //nolint:errcheck
-
- if err := e.Encode(f, m); err != nil {
- return fmt.Errorf("failed to encode image to file: %w", err)
- }
-
- // Write the file path to the buffer
- if _, err := data.WriteString(f.Name()); err != nil {
- return fmt.Errorf("failed to write file path to buffer: %w", err)
- }
- }
-
- // Encode image to base64
- var payload bytes.Buffer // the base64 encoded image to be written to w
- b64 := base64.NewEncoder(base64.StdEncoding, &payload)
- if _, err := data.WriteTo(b64); err != nil {
- return fmt.Errorf("failed to write base64 encoded image to payload: %w", err)
- }
- if err := b64.Close(); err != nil {
- return err
- }
-
- // If not chunking, write all at once
- if !o.Chunk {
- _, err := io.WriteString(w, KittyGraphics(payload.Bytes(), o.Options()...))
- return err
- }
-
- // Write in chunks
- var (
- err error
- n int
- )
- chunk := make([]byte, kitty.MaxChunkSize)
- isFirstChunk := true
-
- for {
- // Stop if we read less than the chunk size [kitty.MaxChunkSize].
- n, err = io.ReadFull(&payload, chunk)
- if errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) {
- break
- }
- if err != nil {
- return fmt.Errorf("failed to read chunk: %w", err)
- }
-
- opts := buildChunkOptions(o, isFirstChunk, false)
- if _, err := io.WriteString(w, KittyGraphics(chunk[:n], opts...)); err != nil {
- return err
- }
-
- isFirstChunk = false
- }
-
- // Write the last chunk
- opts := buildChunkOptions(o, isFirstChunk, true)
- _, err = io.WriteString(w, KittyGraphics(chunk[:n], opts...))
- return err
-}
-
-// buildChunkOptions creates the options slice for a chunk
-func buildChunkOptions(o *kitty.Options, isFirstChunk, isLastChunk bool) []string {
- var opts []string
- if isFirstChunk {
- opts = o.Options()
- } else {
- // These options are allowed in subsequent chunks
- if o.Quite > 0 {
- opts = append(opts, fmt.Sprintf("q=%d", o.Quite))
- }
- if o.Action == kitty.Frame {
- opts = append(opts, "a=f")
- }
- }
-
- if !isFirstChunk || !isLastChunk {
- // We don't need to encode the (m=) option when we only have one chunk.
- if isLastChunk {
- opts = append(opts, "m=0")
- } else {
- opts = append(opts, "m=1")
- }
- }
- return opts
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go b/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go
deleted file mode 100644
index fbd08441..00000000
--- a/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package kitty
-
-import (
- "compress/zlib"
- "fmt"
- "image"
- "image/color"
- "image/png"
- "io"
-)
-
-// Decoder is a decoder for the Kitty graphics protocol. It supports decoding
-// images in the 24-bit [RGB], 32-bit [RGBA], and [PNG] formats. It can also
-// decompress data using zlib.
-// The default format is 32-bit [RGBA].
-type Decoder struct {
- // Uses zlib decompression.
- Decompress bool
-
- // Can be one of [RGB], [RGBA], or [PNG].
- Format int
-
- // Width of the image in pixels. This can be omitted if the image is [PNG]
- // formatted.
- Width int
-
- // Height of the image in pixels. This can be omitted if the image is [PNG]
- // formatted.
- Height int
-}
-
-// Decode decodes the image data from r in the specified format.
-func (d *Decoder) Decode(r io.Reader) (image.Image, error) {
- if d.Decompress {
- zr, err := zlib.NewReader(r)
- if err != nil {
- return nil, fmt.Errorf("failed to create zlib reader: %w", err)
- }
-
- defer zr.Close() //nolint:errcheck
- r = zr
- }
-
- if d.Format == 0 {
- d.Format = RGBA
- }
-
- switch d.Format {
- case RGBA, RGB:
- return d.decodeRGBA(r, d.Format == RGBA)
-
- case PNG:
- return png.Decode(r)
-
- default:
- return nil, fmt.Errorf("unsupported format: %d", d.Format)
- }
-}
-
-// decodeRGBA decodes the image data in 32-bit RGBA or 24-bit RGB formats.
-func (d *Decoder) decodeRGBA(r io.Reader, alpha bool) (image.Image, error) {
- m := image.NewRGBA(image.Rect(0, 0, d.Width, d.Height))
-
- var buf []byte
- if alpha {
- buf = make([]byte, 4)
- } else {
- buf = make([]byte, 3)
- }
-
- for y := 0; y < d.Height; y++ {
- for x := 0; x < d.Width; x++ {
- if _, err := io.ReadFull(r, buf[:]); err != nil {
- return nil, fmt.Errorf("failed to read pixel data: %w", err)
- }
- if alpha {
- m.SetRGBA(x, y, color.RGBA{buf[0], buf[1], buf[2], buf[3]})
- } else {
- m.SetRGBA(x, y, color.RGBA{buf[0], buf[1], buf[2], 0xff})
- }
- }
- }
-
- return m, nil
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go b/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go
deleted file mode 100644
index f668b9e3..00000000
--- a/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package kitty
-
-import (
- "compress/zlib"
- "fmt"
- "image"
- "image/png"
- "io"
-)
-
-// Encoder is an encoder for the Kitty graphics protocol. It supports encoding
-// images in the 24-bit [RGB], 32-bit [RGBA], and [PNG] formats, and
-// compressing the data using zlib.
-// The default format is 32-bit [RGBA].
-type Encoder struct {
- // Uses zlib compression.
- Compress bool
-
- // Can be one of [RGBA], [RGB], or [PNG].
- Format int
-}
-
-// Encode encodes the image data in the specified format and writes it to w.
-func (e *Encoder) Encode(w io.Writer, m image.Image) error {
- if m == nil {
- return nil
- }
-
- if e.Compress {
- zw := zlib.NewWriter(w)
- defer zw.Close() //nolint:errcheck
- w = zw
- }
-
- if e.Format == 0 {
- e.Format = RGBA
- }
-
- switch e.Format {
- case RGBA, RGB:
- bounds := m.Bounds()
- for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
- for x := bounds.Min.X; x < bounds.Max.X; x++ {
- r, g, b, a := m.At(x, y).RGBA()
- switch e.Format {
- case RGBA:
- w.Write([]byte{byte(r >> 8), byte(g >> 8), byte(b >> 8), byte(a >> 8)}) //nolint:errcheck
- case RGB:
- w.Write([]byte{byte(r >> 8), byte(g >> 8), byte(b >> 8)}) //nolint:errcheck
- }
- }
- }
-
- case PNG:
- if err := png.Encode(w, m); err != nil {
- return fmt.Errorf("failed to encode PNG: %w", err)
- }
-
- default:
- return fmt.Errorf("unsupported format: %d", e.Format)
- }
-
- return nil
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go b/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go
deleted file mode 100644
index 490e7a8a..00000000
--- a/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go
+++ /dev/null
@@ -1,414 +0,0 @@
-package kitty
-
-import "errors"
-
-// ErrMissingFile is returned when the file path is missing.
-var ErrMissingFile = errors.New("missing file path")
-
-// MaxChunkSize is the maximum chunk size for the image data.
-const MaxChunkSize = 1024 * 4
-
-// Placeholder is a special Unicode character that can be used as a placeholder
-// for an image.
-const Placeholder = '\U0010EEEE'
-
-// Graphics image format.
-const (
- // 32-bit RGBA format.
- RGBA = 32
-
- // 24-bit RGB format.
- RGB = 24
-
- // PNG format.
- PNG = 100
-)
-
-// Compression types.
-const (
- Zlib = 'z'
-)
-
-// Transmission types.
-const (
- // The data transmitted directly in the escape sequence.
- Direct = 'd'
-
- // The data transmitted in a regular file.
- File = 'f'
-
- // A temporary file is used and deleted after transmission.
- TempFile = 't'
-
- // A shared memory object.
- // For POSIX see https://pubs.opengroup.org/onlinepubs/9699919799/functions/shm_open.html
- // For Windows see https://docs.microsoft.com/en-us/windows/win32/memory/creating-named-shared-memory
- SharedMemory = 's'
-)
-
-// Action types.
-const (
- // Transmit image data.
- Transmit = 't'
- // TransmitAndPut transmit image data and display (put) it.
- TransmitAndPut = 'T'
- // Query terminal for image info.
- Query = 'q'
- // Put (display) previously transmitted image.
- Put = 'p'
- // Delete image.
- Delete = 'd'
- // Frame transmits data for animation frames.
- Frame = 'f'
- // Animate controls animation.
- Animate = 'a'
- // Compose composes animation frames.
- Compose = 'c'
-)
-
-// Delete types.
-const (
- // Delete all placements visible on screen
- DeleteAll = 'a'
- // Delete all images with the specified id, specified using the i key. If
- // you specify a p key for the placement id as well, then only the
- // placement with the specified image id and placement id will be deleted.
- DeleteID = 'i'
- // Delete newest image with the specified number, specified using the I
- // key. If you specify a p key for the placement id as well, then only the
- // placement with the specified number and placement id will be deleted.
- DeleteNumber = 'n'
- // Delete all placements that intersect with the current cursor position.
- DeleteCursor = 'c'
- // Delete animation frames.
- DeleteFrames = 'f'
- // Delete all placements that intersect a specific cell, the cell is
- // specified using the x and y keys
- DeleteCell = 'p'
- // Delete all placements that intersect a specific cell having a specific
- // z-index. The cell and z-index is specified using the x, y and z keys.
- DeleteCellZ = 'q'
- // Delete all images whose id is greater than or equal to the value of the x
- // key and less than or equal to the value of the y.
- DeleteRange = 'r'
- // Delete all placements that intersect the specified column, specified using
- // the x key.
- DeleteColumn = 'x'
- // Delete all placements that intersect the specified row, specified using
- // the y key.
- DeleteRow = 'y'
- // Delete all placements that have the specified z-index, specified using the
- // z key.
- DeleteZ = 'z'
-)
-
-// Diacritic returns the diacritic rune at the specified index. If the index is
-// out of bounds, the first diacritic rune is returned.
-func Diacritic(i int) rune {
- if i < 0 || i >= len(diacritics) {
- return diacritics[0]
- }
- return diacritics[i]
-}
-
-// From https://sw.kovidgoyal.net/kitty/_downloads/f0a0de9ec8d9ff4456206db8e0814937/rowcolumn-diacritics.txt
-// See https://sw.kovidgoyal.net/kitty/graphics-protocol/#unicode-placeholders for further explanation.
-var diacritics = []rune{
- '\u0305',
- '\u030D',
- '\u030E',
- '\u0310',
- '\u0312',
- '\u033D',
- '\u033E',
- '\u033F',
- '\u0346',
- '\u034A',
- '\u034B',
- '\u034C',
- '\u0350',
- '\u0351',
- '\u0352',
- '\u0357',
- '\u035B',
- '\u0363',
- '\u0364',
- '\u0365',
- '\u0366',
- '\u0367',
- '\u0368',
- '\u0369',
- '\u036A',
- '\u036B',
- '\u036C',
- '\u036D',
- '\u036E',
- '\u036F',
- '\u0483',
- '\u0484',
- '\u0485',
- '\u0486',
- '\u0487',
- '\u0592',
- '\u0593',
- '\u0594',
- '\u0595',
- '\u0597',
- '\u0598',
- '\u0599',
- '\u059C',
- '\u059D',
- '\u059E',
- '\u059F',
- '\u05A0',
- '\u05A1',
- '\u05A8',
- '\u05A9',
- '\u05AB',
- '\u05AC',
- '\u05AF',
- '\u05C4',
- '\u0610',
- '\u0611',
- '\u0612',
- '\u0613',
- '\u0614',
- '\u0615',
- '\u0616',
- '\u0617',
- '\u0657',
- '\u0658',
- '\u0659',
- '\u065A',
- '\u065B',
- '\u065D',
- '\u065E',
- '\u06D6',
- '\u06D7',
- '\u06D8',
- '\u06D9',
- '\u06DA',
- '\u06DB',
- '\u06DC',
- '\u06DF',
- '\u06E0',
- '\u06E1',
- '\u06E2',
- '\u06E4',
- '\u06E7',
- '\u06E8',
- '\u06EB',
- '\u06EC',
- '\u0730',
- '\u0732',
- '\u0733',
- '\u0735',
- '\u0736',
- '\u073A',
- '\u073D',
- '\u073F',
- '\u0740',
- '\u0741',
- '\u0743',
- '\u0745',
- '\u0747',
- '\u0749',
- '\u074A',
- '\u07EB',
- '\u07EC',
- '\u07ED',
- '\u07EE',
- '\u07EF',
- '\u07F0',
- '\u07F1',
- '\u07F3',
- '\u0816',
- '\u0817',
- '\u0818',
- '\u0819',
- '\u081B',
- '\u081C',
- '\u081D',
- '\u081E',
- '\u081F',
- '\u0820',
- '\u0821',
- '\u0822',
- '\u0823',
- '\u0825',
- '\u0826',
- '\u0827',
- '\u0829',
- '\u082A',
- '\u082B',
- '\u082C',
- '\u082D',
- '\u0951',
- '\u0953',
- '\u0954',
- '\u0F82',
- '\u0F83',
- '\u0F86',
- '\u0F87',
- '\u135D',
- '\u135E',
- '\u135F',
- '\u17DD',
- '\u193A',
- '\u1A17',
- '\u1A75',
- '\u1A76',
- '\u1A77',
- '\u1A78',
- '\u1A79',
- '\u1A7A',
- '\u1A7B',
- '\u1A7C',
- '\u1B6B',
- '\u1B6D',
- '\u1B6E',
- '\u1B6F',
- '\u1B70',
- '\u1B71',
- '\u1B72',
- '\u1B73',
- '\u1CD0',
- '\u1CD1',
- '\u1CD2',
- '\u1CDA',
- '\u1CDB',
- '\u1CE0',
- '\u1DC0',
- '\u1DC1',
- '\u1DC3',
- '\u1DC4',
- '\u1DC5',
- '\u1DC6',
- '\u1DC7',
- '\u1DC8',
- '\u1DC9',
- '\u1DCB',
- '\u1DCC',
- '\u1DD1',
- '\u1DD2',
- '\u1DD3',
- '\u1DD4',
- '\u1DD5',
- '\u1DD6',
- '\u1DD7',
- '\u1DD8',
- '\u1DD9',
- '\u1DDA',
- '\u1DDB',
- '\u1DDC',
- '\u1DDD',
- '\u1DDE',
- '\u1DDF',
- '\u1DE0',
- '\u1DE1',
- '\u1DE2',
- '\u1DE3',
- '\u1DE4',
- '\u1DE5',
- '\u1DE6',
- '\u1DFE',
- '\u20D0',
- '\u20D1',
- '\u20D4',
- '\u20D5',
- '\u20D6',
- '\u20D7',
- '\u20DB',
- '\u20DC',
- '\u20E1',
- '\u20E7',
- '\u20E9',
- '\u20F0',
- '\u2CEF',
- '\u2CF0',
- '\u2CF1',
- '\u2DE0',
- '\u2DE1',
- '\u2DE2',
- '\u2DE3',
- '\u2DE4',
- '\u2DE5',
- '\u2DE6',
- '\u2DE7',
- '\u2DE8',
- '\u2DE9',
- '\u2DEA',
- '\u2DEB',
- '\u2DEC',
- '\u2DED',
- '\u2DEE',
- '\u2DEF',
- '\u2DF0',
- '\u2DF1',
- '\u2DF2',
- '\u2DF3',
- '\u2DF4',
- '\u2DF5',
- '\u2DF6',
- '\u2DF7',
- '\u2DF8',
- '\u2DF9',
- '\u2DFA',
- '\u2DFB',
- '\u2DFC',
- '\u2DFD',
- '\u2DFE',
- '\u2DFF',
- '\uA66F',
- '\uA67C',
- '\uA67D',
- '\uA6F0',
- '\uA6F1',
- '\uA8E0',
- '\uA8E1',
- '\uA8E2',
- '\uA8E3',
- '\uA8E4',
- '\uA8E5',
- '\uA8E6',
- '\uA8E7',
- '\uA8E8',
- '\uA8E9',
- '\uA8EA',
- '\uA8EB',
- '\uA8EC',
- '\uA8ED',
- '\uA8EE',
- '\uA8EF',
- '\uA8F0',
- '\uA8F1',
- '\uAAB0',
- '\uAAB2',
- '\uAAB3',
- '\uAAB7',
- '\uAAB8',
- '\uAABE',
- '\uAABF',
- '\uAAC1',
- '\uFE20',
- '\uFE21',
- '\uFE22',
- '\uFE23',
- '\uFE24',
- '\uFE25',
- '\uFE26',
- '\U00010A0F',
- '\U00010A38',
- '\U0001D185',
- '\U0001D186',
- '\U0001D187',
- '\U0001D188',
- '\U0001D189',
- '\U0001D1AA',
- '\U0001D1AB',
- '\U0001D1AC',
- '\U0001D1AD',
- '\U0001D242',
- '\U0001D243',
- '\U0001D244',
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/options.go b/vendor/github.com/charmbracelet/x/ansi/kitty/options.go
deleted file mode 100644
index a8d907bd..00000000
--- a/vendor/github.com/charmbracelet/x/ansi/kitty/options.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package kitty
-
-import (
- "encoding"
- "fmt"
- "strconv"
- "strings"
-)
-
-var (
- _ encoding.TextMarshaler = Options{}
- _ encoding.TextUnmarshaler = &Options{}
-)
-
-// Options represents a Kitty Graphics Protocol options.
-type Options struct {
- // Common options.
-
- // Action (a=t) is the action to be performed on the image. Can be one of
- // [Transmit], [TransmitDisplay], [Query], [Put], [Delete], [Frame],
- // [Animate], [Compose].
- Action byte
-
- // Quite mode (q=0) is the quiet mode. Can be either zero, one, or two
- // where zero is the default, 1 suppresses OK responses, and 2 suppresses
- // both OK and error responses.
- Quite byte
-
- // Transmission options.
-
- // ID (i=) is the image ID. The ID is a unique identifier for the image.
- // Must be a positive integer up to [math.MaxUint32].
- ID int
-
- // PlacementID (p=) is the placement ID. The placement ID is a unique
- // identifier for the placement of the image. Must be a positive integer up
- // to [math.MaxUint32].
- PlacementID int
-
- // Number (I=0) is the number of images to be transmitted.
- Number int
-
- // Format (f=32) is the image format. One of [RGBA], [RGB], [PNG].
- Format int
-
- // ImageWidth (s=0) is the transmitted image width.
- ImageWidth int
-
- // ImageHeight (v=0) is the transmitted image height.
- ImageHeight int
-
- // Compression (o=) is the image compression type. Can be [Zlib] or zero.
- Compression byte
-
- // Transmission (t=d) is the image transmission type. Can be [Direct], [File],
- // [TempFile], or[SharedMemory].
- Transmission byte
-
- // File is the file path to be used when the transmission type is [File].
- // If [Options.Transmission] is omitted i.e. zero and this is non-empty,
- // the transmission type is set to [File].
- File string
-
- // Size (S=0) is the size to be read from the transmission medium.
- Size int
-
- // Offset (O=0) is the offset byte to start reading from the transmission
- // medium.
- Offset int
-
- // Chunk (m=) whether the image is transmitted in chunks. Can be either
- // zero or one. When true, the image is transmitted in chunks. Each chunk
- // must be a multiple of 4, and up to [MaxChunkSize] bytes. Each chunk must
- // have the m=1 option except for the last chunk which must have m=0.
- Chunk bool
-
- // Display options.
-
- // X (x=0) is the pixel X coordinate of the image to start displaying.
- X int
-
- // Y (y=0) is the pixel Y coordinate of the image to start displaying.
- Y int
-
- // Z (z=0) is the Z coordinate of the image to display.
- Z int
-
- // Width (w=0) is the width of the image to display.
- Width int
-
- // Height (h=0) is the height of the image to display.
- Height int
-
- // OffsetX (X=0) is the OffsetX coordinate of the cursor cell to start
- // displaying the image. OffsetX=0 is the leftmost cell. This must be
- // smaller than the terminal cell width.
- OffsetX int
-
- // OffsetY (Y=0) is the OffsetY coordinate of the cursor cell to start
- // displaying the image. OffsetY=0 is the topmost cell. This must be
- // smaller than the terminal cell height.
- OffsetY int
-
- // Columns (c=0) is the number of columns to display the image. The image
- // will be scaled to fit the number of columns.
- Columns int
-
- // Rows (r=0) is the number of rows to display the image. The image will be
- // scaled to fit the number of rows.
- Rows int
-
- // VirtualPlacement (U=0) whether to use virtual placement. This is used
- // with Unicode [Placeholder] to display images.
- VirtualPlacement bool
-
- // DoNotMoveCursor (C=0) whether to move the cursor after displaying the
- // image.
- DoNotMoveCursor bool
-
- // ParentID (P=0) is the parent image ID. The parent ID is the ID of the
- // image that is the parent of the current image. This is used with Unicode
- // [Placeholder] to display images relative to the parent image.
- ParentID int
-
- // ParentPlacementID (Q=0) is the parent placement ID. The parent placement
- // ID is the ID of the placement of the parent image. This is used with
- // Unicode [Placeholder] to display images relative to the parent image.
- ParentPlacementID int
-
- // Delete options.
-
- // Delete (d=a) is the delete action. Can be one of [DeleteAll],
- // [DeleteID], [DeleteNumber], [DeleteCursor], [DeleteFrames],
- // [DeleteCell], [DeleteCellZ], [DeleteRange], [DeleteColumn], [DeleteRow],
- // [DeleteZ].
- Delete byte
-
- // DeleteResources indicates whether to delete the resources associated
- // with the image.
- DeleteResources bool
-}
-
-// Options returns the options as a slice of a key-value pairs.
-func (o *Options) Options() (opts []string) {
- opts = []string{}
- if o.Format == 0 {
- o.Format = RGBA
- }
-
- if o.Action == 0 {
- o.Action = Transmit
- }
-
- if o.Delete == 0 {
- o.Delete = DeleteAll
- }
-
- if o.Transmission == 0 {
- if len(o.File) > 0 {
- o.Transmission = File
- } else {
- o.Transmission = Direct
- }
- }
-
- if o.Format != RGBA {
- opts = append(opts, fmt.Sprintf("f=%d", o.Format))
- }
-
- if o.Quite > 0 {
- opts = append(opts, fmt.Sprintf("q=%d", o.Quite))
- }
-
- if o.ID > 0 {
- opts = append(opts, fmt.Sprintf("i=%d", o.ID))
- }
-
- if o.PlacementID > 0 {
- opts = append(opts, fmt.Sprintf("p=%d", o.PlacementID))
- }
-
- if o.Number > 0 {
- opts = append(opts, fmt.Sprintf("I=%d", o.Number))
- }
-
- if o.ImageWidth > 0 {
- opts = append(opts, fmt.Sprintf("s=%d", o.ImageWidth))
- }
-
- if o.ImageHeight > 0 {
- opts = append(opts, fmt.Sprintf("v=%d", o.ImageHeight))
- }
-
- if o.Transmission != Direct {
- opts = append(opts, fmt.Sprintf("t=%c", o.Transmission))
- }
-
- if o.Size > 0 {
- opts = append(opts, fmt.Sprintf("S=%d", o.Size))
- }
-
- if o.Offset > 0 {
- opts = append(opts, fmt.Sprintf("O=%d", o.Offset))
- }
-
- if o.Compression == Zlib {
- opts = append(opts, fmt.Sprintf("o=%c", o.Compression))
- }
-
- if o.VirtualPlacement {
- opts = append(opts, "U=1")
- }
-
- if o.DoNotMoveCursor {
- opts = append(opts, "C=1")
- }
-
- if o.ParentID > 0 {
- opts = append(opts, fmt.Sprintf("P=%d", o.ParentID))
- }
-
- if o.ParentPlacementID > 0 {
- opts = append(opts, fmt.Sprintf("Q=%d", o.ParentPlacementID))
- }
-
- if o.X > 0 {
- opts = append(opts, fmt.Sprintf("x=%d", o.X))
- }
-
- if o.Y > 0 {
- opts = append(opts, fmt.Sprintf("y=%d", o.Y))
- }
-
- if o.Z > 0 {
- opts = append(opts, fmt.Sprintf("z=%d", o.Z))
- }
-
- if o.Width > 0 {
- opts = append(opts, fmt.Sprintf("w=%d", o.Width))
- }
-
- if o.Height > 0 {
- opts = append(opts, fmt.Sprintf("h=%d", o.Height))
- }
-
- if o.OffsetX > 0 {
- opts = append(opts, fmt.Sprintf("X=%d", o.OffsetX))
- }
-
- if o.OffsetY > 0 {
- opts = append(opts, fmt.Sprintf("Y=%d", o.OffsetY))
- }
-
- if o.Columns > 0 {
- opts = append(opts, fmt.Sprintf("c=%d", o.Columns))
- }
-
- if o.Rows > 0 {
- opts = append(opts, fmt.Sprintf("r=%d", o.Rows))
- }
-
- if o.Delete != DeleteAll || o.DeleteResources {
- da := o.Delete
- if o.DeleteResources {
- da = da - ' ' // to uppercase
- }
-
- opts = append(opts, fmt.Sprintf("d=%c", da))
- }
-
- if o.Action != Transmit {
- opts = append(opts, fmt.Sprintf("a=%c", o.Action))
- }
-
- return
-}
-
-// String returns the string representation of the options.
-func (o Options) String() string {
- return strings.Join(o.Options(), ",")
-}
-
-// MarshalText returns the string representation of the options.
-func (o Options) MarshalText() ([]byte, error) {
- return []byte(o.String()), nil
-}
-
-// UnmarshalText parses the options from the given string.
-func (o *Options) UnmarshalText(text []byte) error {
- opts := strings.Split(string(text), ",")
- for _, opt := range opts {
- ps := strings.SplitN(opt, "=", 2)
- if len(ps) != 2 || len(ps[1]) == 0 {
- continue
- }
-
- switch ps[0] {
- case "a":
- o.Action = ps[1][0]
- case "o":
- o.Compression = ps[1][0]
- case "t":
- o.Transmission = ps[1][0]
- case "d":
- d := ps[1][0]
- if d >= 'A' && d <= 'Z' {
- o.DeleteResources = true
- d = d + ' ' // to lowercase
- }
- o.Delete = d
- case "i", "q", "p", "I", "f", "s", "v", "S", "O", "m", "x", "y", "z", "w", "h", "X", "Y", "c", "r", "U", "P", "Q":
- v, err := strconv.Atoi(ps[1])
- if err != nil {
- continue
- }
-
- switch ps[0] {
- case "i":
- o.ID = v
- case "q":
- o.Quite = byte(v)
- case "p":
- o.PlacementID = v
- case "I":
- o.Number = v
- case "f":
- o.Format = v
- case "s":
- o.ImageWidth = v
- case "v":
- o.ImageHeight = v
- case "S":
- o.Size = v
- case "O":
- o.Offset = v
- case "m":
- o.Chunk = v == 0 || v == 1
- case "x":
- o.X = v
- case "y":
- o.Y = v
- case "z":
- o.Z = v
- case "w":
- o.Width = v
- case "h":
- o.Height = v
- case "X":
- o.OffsetX = v
- case "Y":
- o.OffsetY = v
- case "c":
- o.Columns = v
- case "r":
- o.Rows = v
- case "U":
- o.VirtualPlacement = v == 1
- case "P":
- o.ParentID = v
- case "Q":
- o.ParentPlacementID = v
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/mode.go b/vendor/github.com/charmbracelet/x/ansi/mode.go
index 57f3f0a8..03c91108 100644
--- a/vendor/github.com/charmbracelet/x/ansi/mode.go
+++ b/vendor/github.com/charmbracelet/x/ansi/mode.go
@@ -48,7 +48,7 @@ type Mode interface {
Mode() int
}
-// SetMode (SM) returns a sequence to set a mode.
+// SetMode (SM) or (DECSET) returns a sequence to set a mode.
// The mode arguments are a list of modes to set.
//
// If one of the modes is a [DECMode], the function will returns two escape
@@ -72,7 +72,12 @@ func SM(modes ...Mode) string {
return SetMode(modes...)
}
-// ResetMode (RM) returns a sequence to reset a mode.
+// DECSET is an alias for [SetMode].
+func DECSET(modes ...Mode) string {
+ return SetMode(modes...)
+}
+
+// ResetMode (RM) or (DECRST) returns a sequence to reset a mode.
// The mode arguments are a list of modes to reset.
//
// If one of the modes is a [DECMode], the function will returns two escape
@@ -96,9 +101,14 @@ func RM(modes ...Mode) string {
return ResetMode(modes...)
}
+// DECRST is an alias for [ResetMode].
+func DECRST(modes ...Mode) string {
+ return ResetMode(modes...)
+}
+
func setMode(reset bool, modes ...Mode) (s string) {
if len(modes) == 0 {
- return
+ return //nolint:nakedret
}
cmd := "h"
@@ -132,7 +142,7 @@ func setMode(reset bool, modes ...Mode) (s string) {
if len(dec) > 0 {
s += seq + "?" + strings.Join(dec, ";") + cmd
}
- return
+ return //nolint:nakedret
}
// RequestMode (DECRQM) returns a sequence to request a mode from the terminal.
@@ -243,6 +253,21 @@ const (
RequestInsertReplaceMode = "\x1b[4$p"
)
+// BiDirectional Support Mode (BDSM) is a mode that determines whether the
+// terminal supports bidirectional text. When enabled, the terminal supports
+// bidirectional text and is set to implicit bidirectional mode. When disabled,
+// the terminal does not support bidirectional text.
+//
+// See ECMA-48 7.2.1.
+const (
+ BiDirectionalSupportMode = ANSIMode(8)
+ BDSM = BiDirectionalSupportMode
+
+ SetBiDirectionalSupportMode = "\x1b[8h"
+ ResetBiDirectionalSupportMode = "\x1b[8l"
+ RequestBiDirectionalSupportMode = "\x1b[8$p"
+)
+
// Send Receive Mode (SRM) or Local Echo Mode is a mode that determines whether
// the terminal echoes characters back to the host. When enabled, the terminal
// sends characters to the host as they are typed.
@@ -297,7 +322,7 @@ const (
// Deprecated: use [SetCursorKeysMode] and [ResetCursorKeysMode] instead.
const (
- EnableCursorKeys = "\x1b[?1h"
+ EnableCursorKeys = "\x1b[?1h" //nolint:revive // grouped constants
DisableCursorKeys = "\x1b[?1l"
)
@@ -548,8 +573,9 @@ const (
// Deprecated: use [SetFocusEventMode], [ResetFocusEventMode], and
// [RequestFocusEventMode] instead.
+// Focus reporting mode constants.
const (
- ReportFocusMode = DECMode(1004)
+ ReportFocusMode = DECMode(1004) //nolint:revive // grouped constants
EnableReportFocus = "\x1b[?1004h"
DisableReportFocus = "\x1b[?1004l"
@@ -577,7 +603,7 @@ const (
// Deprecated: use [SgrExtMouseMode] [SetSgrExtMouseMode],
// [ResetSgrExtMouseMode], and [RequestSgrExtMouseMode] instead.
const (
- MouseSgrExtMode = DECMode(1006)
+ MouseSgrExtMode = DECMode(1006) //nolint:revive // grouped constants
EnableMouseSgrExt = "\x1b[?1006h"
DisableMouseSgrExt = "\x1b[?1006l"
RequestMouseSgrExt = "\x1b[?1006$p"
@@ -693,7 +719,7 @@ const (
// Deprecated: use [SetBracketedPasteMode], [ResetBracketedPasteMode], and
// [RequestBracketedPasteMode] instead.
const (
- EnableBracketedPaste = "\x1b[?2004h"
+ EnableBracketedPaste = "\x1b[?2004h" //nolint:revive // grouped constants
DisableBracketedPaste = "\x1b[?2004l"
RequestBracketedPaste = "\x1b[?2004$p"
)
@@ -710,6 +736,8 @@ const (
RequestSynchronizedOutputMode = "\x1b[?2026$p"
)
+// Synchronized Output Mode. See [SynchronizedOutputMode].
+//
// Deprecated: use [SynchronizedOutputMode], [SetSynchronizedOutputMode], and
// [ResetSynchronizedOutputMode], and [RequestSynchronizedOutputMode] instead.
const (
@@ -720,12 +748,28 @@ const (
RequestSyncdOutput = "\x1b[?2026$p"
)
+// Unicode Core Mode is a mode that determines whether the terminal should use
+// Unicode grapheme clustering to calculate the width of glyphs for each
+// terminal cell.
+//
+// See: https://github.com/contour-terminal/terminal-unicode-core
+const (
+ UnicodeCoreMode = DECMode(2027)
+
+ SetUnicodeCoreMode = "\x1b[?2027h"
+ ResetUnicodeCoreMode = "\x1b[?2027l"
+ RequestUnicodeCoreMode = "\x1b[?2027$p"
+)
+
// Grapheme Clustering Mode is a mode that determines whether the terminal
// should look for grapheme clusters instead of single runes in the rendered
// text. This makes the terminal properly render combining characters such as
// emojis.
//
// See: https://github.com/contour-terminal/terminal-unicode-core
+//
+// Deprecated: use [GraphemeClusteringMode], [SetUnicodeCoreMode],
+// [ResetUnicodeCoreMode], and [RequestUnicodeCoreMode] instead.
const (
GraphemeClusteringMode = DECMode(2027)
@@ -734,14 +778,54 @@ const (
RequestGraphemeClusteringMode = "\x1b[?2027$p"
)
-// Deprecated: use [SetGraphemeClusteringMode], [ResetGraphemeClusteringMode], and
-// [RequestGraphemeClusteringMode] instead.
+// Grapheme Clustering Mode. See [GraphemeClusteringMode].
+//
+// Deprecated: use [SetUnicodeCoreMode], [ResetUnicodeCoreMode], and
+// [RequestUnicodeCoreMode] instead.
const (
EnableGraphemeClustering = "\x1b[?2027h"
DisableGraphemeClustering = "\x1b[?2027l"
RequestGraphemeClustering = "\x1b[?2027$p"
)
+// LightDarkMode is a mode that enables reporting the operating system's color
+// scheme (light or dark) preference. It reports the color scheme as a [DSR]
+// and [LightDarkReport] escape sequences encoded as follows:
+//
+// CSI ? 997 ; 1 n for dark mode
+// CSI ? 997 ; 2 n for light mode
+//
+// The color preference can also be requested via the following [DSR] and
+// [RequestLightDarkReport] escape sequences:
+//
+// CSI ? 996 n
+//
+// See: https://contour-terminal.org/vt-extensions/color-palette-update-notifications/
+const (
+ LightDarkMode = DECMode(2031)
+
+ SetLightDarkMode = "\x1b[?2031h"
+ ResetLightDarkMode = "\x1b[?2031l"
+ RequestLightDarkMode = "\x1b[?2031$p"
+)
+
+// InBandResizeMode is a mode that reports terminal resize events as escape
+// sequences. This is useful for systems that do not support [SIGWINCH] like
+// Windows.
+//
+// The terminal then sends the following encoding:
+//
+// CSI 48 ; cellsHeight ; cellsWidth ; pixelHeight ; pixelWidth t
+//
+// See: https://gist.github.com/rockorager/e695fb2924d36b2bcf1fff4a3704bd83
+const (
+ InBandResizeMode = DECMode(2048)
+
+ SetInBandResizeMode = "\x1b[?2048h"
+ ResetInBandResizeMode = "\x1b[?2048l"
+ RequestInBandResizeMode = "\x1b[?2048$p"
+)
+
// Win32Input is a mode that determines whether input is processed by the
// Win32 console and Conpty.
//
@@ -757,7 +841,7 @@ const (
// Deprecated: use [SetWin32InputMode], [ResetWin32InputMode], and
// [RequestWin32InputMode] instead.
const (
- EnableWin32Input = "\x1b[?9001h"
+ EnableWin32Input = "\x1b[?9001h" //nolint:revive // grouped constants
DisableWin32Input = "\x1b[?9001l"
RequestWin32Input = "\x1b[?9001$p"
)
diff --git a/vendor/github.com/charmbracelet/x/ansi/modes.go b/vendor/github.com/charmbracelet/x/ansi/modes.go
index 1bec5bc8..6856d35e 100644
--- a/vendor/github.com/charmbracelet/x/ansi/modes.go
+++ b/vendor/github.com/charmbracelet/x/ansi/modes.go
@@ -4,12 +4,6 @@ package ansi
// all modes are [ModeNotRecognized].
type Modes map[Mode]ModeSetting
-// NewModes creates a new Modes map. By default, all modes are
-// [ModeNotRecognized].
-func NewModes() Modes {
- return make(Modes)
-}
-
// Get returns the setting of a terminal mode. If the mode is not set, it
// returns [ModeNotRecognized].
func (m Modes) Get(mode Mode) ModeSetting {
diff --git a/vendor/github.com/charmbracelet/x/ansi/mouse.go b/vendor/github.com/charmbracelet/x/ansi/mouse.go
index 95b0127f..0e4776bb 100644
--- a/vendor/github.com/charmbracelet/x/ansi/mouse.go
+++ b/vendor/github.com/charmbracelet/x/ansi/mouse.go
@@ -134,7 +134,7 @@ func EncodeMouseButton(b MouseButton, motion, shift, alt, ctrl bool) (m byte) {
m |= bitMotion
}
- return
+ return //nolint:nakedret
}
// x10Offset is the offset for X10 mouse events.
diff --git a/vendor/github.com/charmbracelet/x/ansi/parser.go b/vendor/github.com/charmbracelet/x/ansi/parser.go
index 882e1ed7..e770c15f 100644
--- a/vendor/github.com/charmbracelet/x/ansi/parser.go
+++ b/vendor/github.com/charmbracelet/x/ansi/parser.go
@@ -150,7 +150,7 @@ func (p *Parser) StateName() string {
// Parse parses the given dispatcher and byte buffer.
// Deprecated: Loop over the buffer and call [Parser.Advance] instead.
func (p *Parser) Parse(b []byte) {
- for i := 0; i < len(b); i++ {
+ for i := range b {
p.Advance(b[i])
}
}
@@ -245,7 +245,7 @@ func (p *Parser) parseStringCmd() {
if p.dataLen >= 0 {
datalen = p.dataLen
}
- for i := 0; i < datalen; i++ {
+ for i := range datalen {
d := p.data[i]
if d < '0' || d > '9' {
break
diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/const.go b/vendor/github.com/charmbracelet/x/ansi/parser/const.go
index d62dbf37..85c90869 100644
--- a/vendor/github.com/charmbracelet/x/ansi/parser/const.go
+++ b/vendor/github.com/charmbracelet/x/ansi/parser/const.go
@@ -1,3 +1,4 @@
+// Package parser provides ANSI escape sequence parsing functionality.
package parser
// Action is a DEC ANSI parser action.
@@ -19,7 +20,7 @@ const (
IgnoreAction = NoneAction
)
-// nolint: unused
+// ActionNames provides string names for parser actions.
var ActionNames = []string{
"NoneAction",
"ClearAction",
@@ -58,7 +59,7 @@ const (
Utf8State
)
-// nolint: unused
+// StateNames provides string names for parser states.
var StateNames = []string{
"GroundState",
"CsiEntryState",
diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/seq.go b/vendor/github.com/charmbracelet/x/ansi/parser/seq.go
index 29f491d1..de7e15e6 100644
--- a/vendor/github.com/charmbracelet/x/ansi/parser/seq.go
+++ b/vendor/github.com/charmbracelet/x/ansi/parser/seq.go
@@ -78,7 +78,7 @@ func Subparams(params []int, i int) []int {
// Count the number of parameters before the given parameter index.
var count int
var j int
- for j = 0; j < len(params); j++ {
+ for j = range params {
if count == i {
break
}
@@ -116,7 +116,7 @@ func Subparams(params []int, i int) []int {
// sub-parameters.
func Len(params []int) int {
var n int
- for i := 0; i < len(params); i++ {
+ for i := range params {
if !HasMore(params, i) {
n++
}
@@ -128,7 +128,7 @@ func Len(params []int) int {
// function for each parameter.
// The function should return false to stop the iteration.
func Range(params []int, fn func(i int, param int, hasMore bool) bool) {
- for i := 0; i < len(params); i++ {
+ for i := range params {
if !fn(i, Param(params, i), HasMore(params, i)) {
break
}
diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go b/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go
index 558a5eac..ef46b7b6 100644
--- a/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go
+++ b/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go
@@ -30,7 +30,7 @@ func NewTransitionTable(size int) TransitionTable {
// SetDefault sets default transition.
func (t TransitionTable) SetDefault(action Action, state State) {
- for i := 0; i < len(t); i++ {
+ for i := range t {
t[i] = action<> TransitionActionShift
}
-// byte range macro
+// byte range macro.
func r(start, end byte) []byte {
var a []byte
for i := int(start); i <= int(end); i++ {
diff --git a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go
index 3e504739..dfd2dc76 100644
--- a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go
+++ b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go
@@ -359,7 +359,7 @@ func parseOscCmd(p *Parser) {
if p == nil || p.cmd != parser.MissingCommand {
return
}
- for j := 0; j < p.dataLen; j++ {
+ for j := range p.dataLen {
d := p.data[j]
if d < '0' || d > '9' {
break
diff --git a/vendor/github.com/charmbracelet/x/ansi/passthrough.go b/vendor/github.com/charmbracelet/x/ansi/passthrough.go
index 14a74522..7ac7cef1 100644
--- a/vendor/github.com/charmbracelet/x/ansi/passthrough.go
+++ b/vendor/github.com/charmbracelet/x/ansi/passthrough.go
@@ -21,10 +21,7 @@ func ScreenPassthrough(seq string, limit int) string {
b.WriteString("\x1bP")
if limit > 0 {
for i := 0; i < len(seq); i += limit {
- end := i + limit
- if end > len(seq) {
- end = len(seq)
- }
+ end := min(i+limit, len(seq))
b.WriteString(seq[i:end])
if end < len(seq) {
b.WriteString("\x1b\\\x1bP")
@@ -52,7 +49,7 @@ func ScreenPassthrough(seq string, limit int) string {
func TmuxPassthrough(seq string) string {
var b bytes.Buffer
b.WriteString("\x1bPtmux;")
- for i := 0; i < len(seq); i++ {
+ for i := range len(seq) {
if seq[i] == ESC {
b.WriteByte(ESC)
}
diff --git a/vendor/github.com/charmbracelet/x/ansi/screen.go b/vendor/github.com/charmbracelet/x/ansi/screen.go
index c76e4f0d..e2027dff 100644
--- a/vendor/github.com/charmbracelet/x/ansi/screen.go
+++ b/vendor/github.com/charmbracelet/x/ansi/screen.go
@@ -351,7 +351,7 @@ func DECRQPSR(n int) string {
//
// See: https://vt100.net/docs/vt510-rm/DECTABSR.html
func TabStopReport(stops ...int) string {
- var s []string
+ var s []string //nolint:prealloc
for _, v := range stops {
s = append(s, strconv.Itoa(v))
}
@@ -376,7 +376,7 @@ func DECTABSR(stops ...int) string {
//
// See: https://vt100.net/docs/vt510-rm/DECCIR.html
func CursorInformationReport(values ...int) string {
- var s []string
+ var s []string //nolint:prealloc
for _, v := range values {
s = append(s, strconv.Itoa(v))
}
@@ -395,7 +395,7 @@ func DECCIR(values ...int) string {
//
// CSI Pn b
//
-// See: ECMA-48 § 8.3.103
+// See: ECMA-48 § 8.3.103.
func RepeatPreviousCharacter(n int) string {
var s string
if n > 1 {
diff --git a/vendor/github.com/charmbracelet/x/ansi/sgr.go b/vendor/github.com/charmbracelet/x/ansi/sgr.go
index 1a18c98e..5e6d05df 100644
--- a/vendor/github.com/charmbracelet/x/ansi/sgr.go
+++ b/vendor/github.com/charmbracelet/x/ansi/sgr.go
@@ -1,8 +1,6 @@
package ansi
-import "strconv"
-
-// Select Graphic Rendition (SGR) is a command that sets display attributes.
+// SelectGraphicRendition (SGR) is a command that sets display attributes.
//
// Default is 0.
//
@@ -14,20 +12,7 @@ func SelectGraphicRendition(ps ...Attr) string {
return ResetStyle
}
- var s Style
- for _, p := range ps {
- attr, ok := attrStrings[p]
- if ok {
- s = append(s, attr)
- } else {
- if p < 0 {
- p = 0
- }
- s = append(s, strconv.Itoa(p))
- }
- }
-
- return s.String()
+ return NewStyle(ps...).String()
}
// SGR is an alias for [SelectGraphicRendition].
@@ -36,60 +21,59 @@ func SGR(ps ...Attr) string {
}
var attrStrings = map[int]string{
- ResetAttr: "0",
- BoldAttr: "1",
- FaintAttr: "2",
- ItalicAttr: "3",
- UnderlineAttr: "4",
- SlowBlinkAttr: "5",
- RapidBlinkAttr: "6",
- ReverseAttr: "7",
- ConcealAttr: "8",
- StrikethroughAttr: "9",
- NoBoldAttr: "21",
- NormalIntensityAttr: "22",
- NoItalicAttr: "23",
- NoUnderlineAttr: "24",
- NoBlinkAttr: "25",
- NoReverseAttr: "27",
- NoConcealAttr: "28",
- NoStrikethroughAttr: "29",
- BlackForegroundColorAttr: "30",
- RedForegroundColorAttr: "31",
- GreenForegroundColorAttr: "32",
- YellowForegroundColorAttr: "33",
- BlueForegroundColorAttr: "34",
- MagentaForegroundColorAttr: "35",
- CyanForegroundColorAttr: "36",
- WhiteForegroundColorAttr: "37",
- ExtendedForegroundColorAttr: "38",
- DefaultForegroundColorAttr: "39",
- BlackBackgroundColorAttr: "40",
- RedBackgroundColorAttr: "41",
- GreenBackgroundColorAttr: "42",
- YellowBackgroundColorAttr: "43",
- BlueBackgroundColorAttr: "44",
- MagentaBackgroundColorAttr: "45",
- CyanBackgroundColorAttr: "46",
- WhiteBackgroundColorAttr: "47",
- ExtendedBackgroundColorAttr: "48",
- DefaultBackgroundColorAttr: "49",
- ExtendedUnderlineColorAttr: "58",
- DefaultUnderlineColorAttr: "59",
- BrightBlackForegroundColorAttr: "90",
- BrightRedForegroundColorAttr: "91",
- BrightGreenForegroundColorAttr: "92",
- BrightYellowForegroundColorAttr: "93",
- BrightBlueForegroundColorAttr: "94",
- BrightMagentaForegroundColorAttr: "95",
- BrightCyanForegroundColorAttr: "96",
- BrightWhiteForegroundColorAttr: "97",
- BrightBlackBackgroundColorAttr: "100",
- BrightRedBackgroundColorAttr: "101",
- BrightGreenBackgroundColorAttr: "102",
- BrightYellowBackgroundColorAttr: "103",
- BrightBlueBackgroundColorAttr: "104",
- BrightMagentaBackgroundColorAttr: "105",
- BrightCyanBackgroundColorAttr: "106",
- BrightWhiteBackgroundColorAttr: "107",
+ ResetAttr: resetAttr,
+ BoldAttr: boldAttr,
+ FaintAttr: faintAttr,
+ ItalicAttr: italicAttr,
+ UnderlineAttr: underlineAttr,
+ SlowBlinkAttr: slowBlinkAttr,
+ RapidBlinkAttr: rapidBlinkAttr,
+ ReverseAttr: reverseAttr,
+ ConcealAttr: concealAttr,
+ StrikethroughAttr: strikethroughAttr,
+ NormalIntensityAttr: normalIntensityAttr,
+ NoItalicAttr: noItalicAttr,
+ NoUnderlineAttr: noUnderlineAttr,
+ NoBlinkAttr: noBlinkAttr,
+ NoReverseAttr: noReverseAttr,
+ NoConcealAttr: noConcealAttr,
+ NoStrikethroughAttr: noStrikethroughAttr,
+ BlackForegroundColorAttr: blackForegroundColorAttr,
+ RedForegroundColorAttr: redForegroundColorAttr,
+ GreenForegroundColorAttr: greenForegroundColorAttr,
+ YellowForegroundColorAttr: yellowForegroundColorAttr,
+ BlueForegroundColorAttr: blueForegroundColorAttr,
+ MagentaForegroundColorAttr: magentaForegroundColorAttr,
+ CyanForegroundColorAttr: cyanForegroundColorAttr,
+ WhiteForegroundColorAttr: whiteForegroundColorAttr,
+ ExtendedForegroundColorAttr: extendedForegroundColorAttr,
+ DefaultForegroundColorAttr: defaultForegroundColorAttr,
+ BlackBackgroundColorAttr: blackBackgroundColorAttr,
+ RedBackgroundColorAttr: redBackgroundColorAttr,
+ GreenBackgroundColorAttr: greenBackgroundColorAttr,
+ YellowBackgroundColorAttr: yellowBackgroundColorAttr,
+ BlueBackgroundColorAttr: blueBackgroundColorAttr,
+ MagentaBackgroundColorAttr: magentaBackgroundColorAttr,
+ CyanBackgroundColorAttr: cyanBackgroundColorAttr,
+ WhiteBackgroundColorAttr: whiteBackgroundColorAttr,
+ ExtendedBackgroundColorAttr: extendedBackgroundColorAttr,
+ DefaultBackgroundColorAttr: defaultBackgroundColorAttr,
+ ExtendedUnderlineColorAttr: extendedUnderlineColorAttr,
+ DefaultUnderlineColorAttr: defaultUnderlineColorAttr,
+ BrightBlackForegroundColorAttr: brightBlackForegroundColorAttr,
+ BrightRedForegroundColorAttr: brightRedForegroundColorAttr,
+ BrightGreenForegroundColorAttr: brightGreenForegroundColorAttr,
+ BrightYellowForegroundColorAttr: brightYellowForegroundColorAttr,
+ BrightBlueForegroundColorAttr: brightBlueForegroundColorAttr,
+ BrightMagentaForegroundColorAttr: brightMagentaForegroundColorAttr,
+ BrightCyanForegroundColorAttr: brightCyanForegroundColorAttr,
+ BrightWhiteForegroundColorAttr: brightWhiteForegroundColorAttr,
+ BrightBlackBackgroundColorAttr: brightBlackBackgroundColorAttr,
+ BrightRedBackgroundColorAttr: brightRedBackgroundColorAttr,
+ BrightGreenBackgroundColorAttr: brightGreenBackgroundColorAttr,
+ BrightYellowBackgroundColorAttr: brightYellowBackgroundColorAttr,
+ BrightBlueBackgroundColorAttr: brightBlueBackgroundColorAttr,
+ BrightMagentaBackgroundColorAttr: brightMagentaBackgroundColorAttr,
+ BrightCyanBackgroundColorAttr: brightCyanBackgroundColorAttr,
+ BrightWhiteBackgroundColorAttr: brightWhiteBackgroundColorAttr,
}
diff --git a/vendor/github.com/charmbracelet/x/ansi/status.go b/vendor/github.com/charmbracelet/x/ansi/status.go
index 4337e189..3adfb028 100644
--- a/vendor/github.com/charmbracelet/x/ansi/status.go
+++ b/vendor/github.com/charmbracelet/x/ansi/status.go
@@ -11,10 +11,10 @@ type StatusReport interface {
StatusReport() int
}
-// ANSIReport represents an ANSI terminal status report.
+// ANSIStatusReport represents an ANSI terminal status report.
type ANSIStatusReport int //nolint:revive
-// Report returns the status report identifier.
+// StatusReport returns the status report identifier.
func (s ANSIStatusReport) StatusReport() int {
return int(s)
}
@@ -22,7 +22,7 @@ func (s ANSIStatusReport) StatusReport() int {
// DECStatusReport represents a DEC terminal status report.
type DECStatusReport int
-// Status returns the status report identifier.
+// StatusReport returns the status report identifier.
func (s DECStatusReport) StatusReport() int {
return int(s)
}
@@ -89,6 +89,16 @@ const RequestCursorPositionReport = "\x1b[6n"
// See: https://vt100.net/docs/vt510-rm/DECXCPR.html
const RequestExtendedCursorPositionReport = "\x1b[?6n"
+// RequestLightDarkReport is a control sequence that requests the terminal to
+// report its operating system light/dark color preference. Supported terminals
+// should respond with a [LightDarkReport] sequence as follows:
+//
+// CSI ? 997 ; 1 n for dark mode
+// CSI ? 997 ; 2 n for light mode
+//
+// See: https://contour-terminal.org/vt-extensions/color-palette-update-notifications/
+const RequestLightDarkReport = "\x1b[?996n"
+
// CursorPositionReport (CPR) is a control sequence that reports the cursor's
// position.
//
@@ -142,3 +152,17 @@ func ExtendedCursorPositionReport(line, column, page int) string {
func DECXCPR(line, column, page int) string {
return ExtendedCursorPositionReport(line, column, page)
}
+
+// LightDarkReport is a control sequence that reports the terminal's operating
+// system light/dark color preference.
+//
+// CSI ? 997 ; 1 n for dark mode
+// CSI ? 997 ; 2 n for light mode
+//
+// See: https://contour-terminal.org/vt-extensions/color-palette-update-notifications/
+func LightDarkReport(dark bool) string {
+ if dark {
+ return "\x1b[?997;1n"
+ }
+ return "\x1b[?997;2n"
+}
diff --git a/vendor/github.com/charmbracelet/x/ansi/style.go b/vendor/github.com/charmbracelet/x/ansi/style.go
index 46ddcaa9..d8a7efae 100644
--- a/vendor/github.com/charmbracelet/x/ansi/style.go
+++ b/vendor/github.com/charmbracelet/x/ansi/style.go
@@ -17,6 +17,26 @@ type Attr = int
// Style represents an ANSI SGR (Select Graphic Rendition) style.
type Style []string
+// NewStyle returns a new style with the given attributes.
+func NewStyle(attrs ...Attr) Style {
+ if len(attrs) == 0 {
+ return Style{}
+ }
+ s := make(Style, 0, len(attrs))
+ for _, a := range attrs {
+ attr, ok := attrStrings[a]
+ if ok {
+ s = append(s, attr)
+ } else {
+ if a < 0 {
+ a = 0
+ }
+ s = append(s, strconv.Itoa(a))
+ }
+ }
+ return s
+}
+
// String returns the ANSI SGR (Select Graphic Rendition) style sequence for
// the given style.
func (s Style) String() string {
@@ -127,11 +147,6 @@ func (s Style) Strikethrough() Style {
return append(s, strikethroughAttr)
}
-// NoBold appends the no bold style attribute to the style.
-func (s Style) NoBold() Style {
- return append(s, noBoldAttr)
-}
-
// NormalIntensity appends the normal intensity style attribute to the style.
func (s Style) NormalIntensity() Style {
return append(s, normalIntensityAttr)
@@ -236,7 +251,6 @@ const (
ReverseAttr Attr = 7
ConcealAttr Attr = 8
StrikethroughAttr Attr = 9
- NoBoldAttr Attr = 21 // Some terminals treat this as double underline.
NormalIntensityAttr Attr = 22
NoItalicAttr Attr = 23
NoUnderlineAttr Attr = 24
@@ -298,7 +312,6 @@ const (
reverseAttr = "7"
concealAttr = "8"
strikethroughAttr = "9"
- noBoldAttr = "21"
normalIntensityAttr = "22"
noItalicAttr = "23"
noUnderlineAttr = "24"
@@ -581,7 +594,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) {
B: uint8(b), //nolint:gosec
A: 0xff,
}
- return
+ return //nolint:nakedret
case 3: // CMY direct color
if len(params) < 5 {
@@ -599,7 +612,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) {
Y: uint8(y), //nolint:gosec
K: 0,
}
- return
+ return //nolint:nakedret
case 4: // CMYK direct color
if len(params) < 6 {
@@ -617,7 +630,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) {
Y: uint8(y), //nolint:gosec
K: uint8(k), //nolint:gosec
}
- return
+ return //nolint:nakedret
case 5: // indexed color
if len(params) < 3 {
@@ -652,7 +665,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) {
B: uint8(b), //nolint:gosec
A: uint8(a), //nolint:gosec
}
- return
+ return //nolint:nakedret
default:
return 0
diff --git a/vendor/github.com/charmbracelet/x/ansi/termcap.go b/vendor/github.com/charmbracelet/x/ansi/termcap.go
index 3c5c7da9..b59aa420 100644
--- a/vendor/github.com/charmbracelet/x/ansi/termcap.go
+++ b/vendor/github.com/charmbracelet/x/ansi/termcap.go
@@ -5,7 +5,7 @@ import (
"strings"
)
-// RequestTermcap (XTGETTCAP) requests Termcap/Terminfo strings.
+// XTGETTCAP (RequestTermcap) requests Termcap/Terminfo strings.
//
// DCS + q ST
//
diff --git a/vendor/github.com/charmbracelet/x/ansi/title.go b/vendor/github.com/charmbracelet/x/ansi/title.go
index 8fd8bf98..54ef9423 100644
--- a/vendor/github.com/charmbracelet/x/ansi/title.go
+++ b/vendor/github.com/charmbracelet/x/ansi/title.go
@@ -30,3 +30,19 @@ func SetIconName(s string) string {
func SetWindowTitle(s string) string {
return "\x1b]2;" + s + "\x07"
}
+
+// DECSWT is a sequence for setting the window title.
+//
+// This is an alias for [SetWindowTitle]("1;").
+// See: EK-VT520-RM 5–156 https://vt100.net/dec/ek-vt520-rm.pdf
+func DECSWT(name string) string {
+ return SetWindowTitle("1;" + name)
+}
+
+// DECSIN is a sequence for setting the icon name.
+//
+// This is an alias for [SetWindowTitle]("L;").
+// See: EK-VT520-RM 5–134 https://vt100.net/dec/ek-vt520-rm.pdf
+func DECSIN(name string) string {
+ return SetWindowTitle("L;" + name)
+}
diff --git a/vendor/github.com/charmbracelet/x/ansi/truncate.go b/vendor/github.com/charmbracelet/x/ansi/truncate.go
index 1fa3efef..3f541fa5 100644
--- a/vendor/github.com/charmbracelet/x/ansi/truncate.go
+++ b/vendor/github.com/charmbracelet/x/ansi/truncate.go
@@ -10,8 +10,7 @@ import (
// Cut the string, without adding any prefix or tail strings. This function is
// aware of ANSI escape codes and will not break them, and accounts for
-// wide-characters (such as East-Asian characters and emojis). Note that the
-// [left] parameter is inclusive, while [right] isn't.
+// wide-characters (such as East-Asian characters and emojis).
// This treats the text as a sequence of graphemes.
func Cut(s string, left, right int) string {
return cut(GraphemeWidth, s, left, right)
@@ -19,8 +18,10 @@ func Cut(s string, left, right int) string {
// CutWc the string, without adding any prefix or tail strings. This function is
// aware of ANSI escape codes and will not break them, and accounts for
-// wide-characters (such as East-Asian characters and emojis). Note that the
-// [left] parameter is inclusive, while [right] isn't.
+// wide-characters (such as East-Asian characters and emojis).
+// Note that the [left] parameter is inclusive, while [right] isn't,
+// which is to say it'll return `[left, right)`.
+//
// This treats the text as a sequence of wide characters and runes.
func CutWc(s string, left, right int) string {
return cut(WcWidth, s, left, right)
@@ -41,7 +42,7 @@ func cut(m Method, s string, left, right int) string {
if left == 0 {
return truncate(s, right, "")
}
- return truncateLeft(Truncate(s, right, ""), left, "")
+ return truncateLeft(truncate(s, right, ""), left, "")
}
// Truncate truncates a string to a given length, adding a tail to the end if
@@ -99,6 +100,7 @@ func truncate(m Method, s string, length int, tail string) string {
// increment the index by the length of the cluster
i += len(cluster)
+ curWidth += width
// Are we ignoring? Skip to the next byte
if ignoring {
@@ -107,16 +109,15 @@ func truncate(m Method, s string, length int, tail string) string {
// Is this gonna be too wide?
// If so write the tail and stop collecting.
- if curWidth+width > length && !ignoring {
+ if curWidth > length && !ignoring {
ignoring = true
buf.WriteString(tail)
}
- if curWidth+width > length {
+ if curWidth > length {
continue
}
- curWidth += width
buf.Write(cluster)
// Done collecting, now we're back in the ground state.
@@ -142,6 +143,14 @@ func truncate(m Method, s string, length int, tail string) string {
// collects printable ASCII
curWidth++
fallthrough
+ case parser.ExecuteAction:
+ // execute action will be things like \n, which, if outside the cut,
+ // should be ignored.
+ if ignoring {
+ i++
+ continue
+ }
+ fallthrough
default:
buf.WriteByte(b[i])
i++
@@ -214,14 +223,14 @@ func truncateLeft(m Method, s string, n int, prefix string) string {
buf.WriteString(prefix)
}
- if ignoring {
- continue
- }
-
if curWidth > n {
buf.Write(cluster)
}
+ if ignoring {
+ continue
+ }
+
pstate = parser.GroundState
continue
}
@@ -240,6 +249,14 @@ func truncateLeft(m Method, s string, n int, prefix string) string {
continue
}
+ fallthrough
+ case parser.ExecuteAction:
+ // execute action will be things like \n, which, if outside the cut,
+ // should be ignored.
+ if ignoring {
+ i++
+ continue
+ }
fallthrough
default:
buf.WriteByte(b[i])
diff --git a/vendor/github.com/charmbracelet/x/ansi/util.go b/vendor/github.com/charmbracelet/x/ansi/util.go
index 301ef15f..103f452b 100644
--- a/vendor/github.com/charmbracelet/x/ansi/util.go
+++ b/vendor/github.com/charmbracelet/x/ansi/util.go
@@ -10,7 +10,7 @@ import (
)
// colorToHexString returns a hex string representation of a color.
-func colorToHexString(c color.Color) string {
+func colorToHexString(c color.Color) string { //nolint:unused
if c == nil {
return ""
}
@@ -28,7 +28,7 @@ func colorToHexString(c color.Color) string {
// rgbToHex converts red, green, and blue values to a hexadecimal value.
//
// hex := rgbToHex(0, 0, 255) // 0x0000FF
-func rgbToHex(r, g, b uint32) uint32 {
+func rgbToHex(r, g, b uint32) uint32 { //nolint:unused
return r<<16 + g<<8 + b
}
@@ -90,17 +90,3 @@ func XParseColor(s string) color.Color {
}
return nil
}
-
-type ordered interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64 |
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
- ~float32 | ~float64 |
- ~string
-}
-
-func max[T ordered](a, b T) T { //nolint:predeclared
- if a > b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/charmbracelet/x/ansi/width.go b/vendor/github.com/charmbracelet/x/ansi/width.go
index d0487d35..cc085816 100644
--- a/vendor/github.com/charmbracelet/x/ansi/width.go
+++ b/vendor/github.com/charmbracelet/x/ansi/width.go
@@ -19,7 +19,7 @@ func Strip(s string) string {
// This implements a subset of the Parser to only collect runes and
// printable characters.
- for i := 0; i < len(s); i++ {
+ for i := range len(s) {
if pstate == parser.Utf8State {
// During this state, collect rw bytes to form a valid rune in the
// buffer. After getting all the rune bytes into the buffer,
diff --git a/vendor/github.com/charmbracelet/x/ansi/wrap.go b/vendor/github.com/charmbracelet/x/ansi/wrap.go
index 6b995800..253e1233 100644
--- a/vendor/github.com/charmbracelet/x/ansi/wrap.go
+++ b/vendor/github.com/charmbracelet/x/ansi/wrap.go
@@ -10,7 +10,7 @@ import (
"github.com/rivo/uniseg"
)
-// nbsp is a non-breaking space
+// nbsp is a non-breaking space.
const nbsp = 0xA0
// Hardwrap wraps a string or a block of text to a given line length, breaking
@@ -55,7 +55,7 @@ func hardwrap(m Method, s string, limit int, preserveSpace bool) string {
i := 0
for i < len(b) {
state, action := parser.Table.Transition(pstate, b[i])
- if state == parser.Utf8State {
+ if state == parser.Utf8State { //nolint:nestif
var width int
cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1)
if m == WcWidth {
@@ -190,7 +190,7 @@ func wordwrap(m Method, s string, limit int, breakpoints string) string {
i := 0
for i < len(b) {
state, action := parser.Table.Transition(pstate, b[i])
- if state == parser.Utf8State {
+ if state == parser.Utf8State { //nolint:nestif
var width int
cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1)
if m == WcWidth {
@@ -303,20 +303,22 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
}
var (
- cluster []byte
- buf bytes.Buffer
- word bytes.Buffer
- space bytes.Buffer
- curWidth int // written width of the line
- wordLen int // word buffer len without ANSI escape codes
- pstate = parser.GroundState // initial state
- b = []byte(s)
+ cluster []byte
+ buf bytes.Buffer
+ word bytes.Buffer
+ space bytes.Buffer
+ spaceWidth int // width of the space buffer
+ curWidth int // written width of the line
+ wordLen int // word buffer len without ANSI escape codes
+ pstate = parser.GroundState // initial state
+ b = []byte(s)
)
addSpace := func() {
- curWidth += space.Len()
+ curWidth += spaceWidth
buf.Write(space.Bytes())
space.Reset()
+ spaceWidth = 0
}
addWord := func() {
@@ -335,12 +337,13 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
buf.WriteByte('\n')
curWidth = 0
space.Reset()
+ spaceWidth = 0
}
i := 0
for i < len(b) {
state, action := parser.Table.Transition(pstate, b[i])
- if state == parser.Utf8State {
+ if state == parser.Utf8State { //nolint:nestif
var width int
cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1)
if m == WcWidth {
@@ -353,6 +356,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
case r != utf8.RuneError && unicode.IsSpace(r) && r != nbsp: // nbsp is a non-breaking space
addWord()
space.WriteRune(r)
+ spaceWidth += width
case bytes.ContainsAny(cluster, breakpoints):
addSpace()
if curWidth+wordLen+width > limit {
@@ -372,7 +376,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
word.Write(cluster)
wordLen += width
- if curWidth+wordLen+space.Len() > limit {
+ if curWidth+wordLen+spaceWidth > limit {
addNewline()
}
}
@@ -386,13 +390,14 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
switch r := rune(b[i]); {
case r == '\n':
if wordLen == 0 {
- if curWidth+space.Len() > limit {
+ if curWidth+spaceWidth > limit {
curWidth = 0
} else {
// preserve whitespaces
buf.Write(space.Bytes())
}
space.Reset()
+ spaceWidth = 0
}
addWord()
@@ -400,6 +405,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
case unicode.IsSpace(r):
addWord()
space.WriteRune(r)
+ spaceWidth++
case r == '-':
fallthrough
case runeContainsAny(r, breakpoints):
@@ -426,7 +432,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
addWord()
}
- if curWidth+wordLen+space.Len() > limit {
+ if curWidth+wordLen+spaceWidth > limit {
addNewline()
}
}
@@ -443,13 +449,14 @@ func wrap(m Method, s string, limit int, breakpoints string) string {
}
if wordLen == 0 {
- if curWidth+space.Len() > limit {
+ if curWidth+spaceWidth > limit {
curWidth = 0
} else {
// preserve whitespaces
buf.Write(space.Bytes())
}
space.Reset()
+ spaceWidth = 0
}
addWord()
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
index 5a939100..1f165141 100644
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
+++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
@@ -18,6 +18,9 @@ func (Curve) Identity() *Point {
func (Curve) IsOnCurve(P *Point) bool {
x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
rhs, lhs := &fp.Elt{}, &fp.Elt{}
+ // Check z != 0
+ eq0 := !fp.IsZero(&P.z)
+
fp.Mul(t, &P.ta, &P.tb) // t = ta*tb
fp.Sqr(x2, &P.x) // x^2
fp.Sqr(y2, &P.y) // y^2
@@ -27,13 +30,14 @@ func (Curve) IsOnCurve(P *Point) bool {
fp.Mul(rhs, t2, ¶mD) // dt^2
fp.Add(rhs, rhs, z2) // z^2 + dt^2
fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2)
- eq0 := fp.IsZero(lhs)
+ eq1 := fp.IsZero(lhs)
fp.Mul(lhs, &P.x, &P.y) // xy
fp.Mul(rhs, t, &P.z) // tz
fp.Sub(lhs, lhs, rhs) // xy - tz
- eq1 := fp.IsZero(lhs)
- return eq0 && eq1
+ eq2 := fp.IsZero(lhs)
+
+ return eq0 && eq1 && eq2
}
// Generator returns the generator point.
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
index 374a6950..d1c3b146 100644
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
+++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
@@ -164,7 +164,7 @@ func (P *pointR1) isEqual(Q *pointR1) bool {
fp.Mul(r, r, &P.z)
fp.Sub(l, l, r)
b = b && fp.IsZero(l)
- return b
+ return b && !fp.IsZero(&P.z) && !fp.IsZero(&Q.z)
}
func (P *pointR3) neg() {
diff --git a/vendor/github.com/containerd/errdefs/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE
new file mode 100644
index 00000000..584149b6
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md
new file mode 100644
index 00000000..bd418c63
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/README.md
@@ -0,0 +1,13 @@
+# errdefs
+
+A Go package for defining and checking common containerd errors.
+
+## Project details
+
+**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go
new file mode 100644
index 00000000..f654d196
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/errors.go
@@ -0,0 +1,443 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package errdefs defines the common errors used throughout containerd
+// packages.
+//
+// Use with fmt.Errorf to add context to an error.
+//
+// To detect an error class, use the IsXXX functions to tell whether an error
+// is of a certain type.
+package errdefs
+
+import (
+ "context"
+ "errors"
+)
+
+// Definitions of common error types used throughout containerd. All containerd
+// errors returned by most packages will map into one of these errors classes.
+// Packages should return errors of these types when they want to instruct a
+// client to take a particular action.
+//
+// These errors map closely to grpc errors.
+var (
+ ErrUnknown = errUnknown{}
+ ErrInvalidArgument = errInvalidArgument{}
+ ErrNotFound = errNotFound{}
+ ErrAlreadyExists = errAlreadyExists{}
+ ErrPermissionDenied = errPermissionDenied{}
+ ErrResourceExhausted = errResourceExhausted{}
+ ErrFailedPrecondition = errFailedPrecondition{}
+ ErrConflict = errConflict{}
+ ErrNotModified = errNotModified{}
+ ErrAborted = errAborted{}
+ ErrOutOfRange = errOutOfRange{}
+ ErrNotImplemented = errNotImplemented{}
+ ErrInternal = errInternal{}
+ ErrUnavailable = errUnavailable{}
+ ErrDataLoss = errDataLoss{}
+ ErrUnauthenticated = errUnauthorized{}
+)
+
+// cancelled maps to Moby's "ErrCancelled"
+type cancelled interface {
+ Cancelled()
+}
+
+// IsCanceled returns true if the error is due to `context.Canceled`.
+func IsCanceled(err error) bool {
+ return errors.Is(err, context.Canceled) || isInterface[cancelled](err)
+}
+
+type errUnknown struct{}
+
+func (errUnknown) Error() string { return "unknown" }
+
+func (errUnknown) Unknown() {}
+
+func (e errUnknown) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unknown maps to Moby's "ErrUnknown"
+type unknown interface {
+ Unknown()
+}
+
+// IsUnknown returns true if the error is due to an unknown error,
+// unhandled condition or unexpected response.
+func IsUnknown(err error) bool {
+ return errors.Is(err, errUnknown{}) || isInterface[unknown](err)
+}
+
+type errInvalidArgument struct{}
+
+func (errInvalidArgument) Error() string { return "invalid argument" }
+
+func (errInvalidArgument) InvalidParameter() {}
+
+func (e errInvalidArgument) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// invalidParameter maps to Moby's "ErrInvalidParameter"
+type invalidParameter interface {
+ InvalidParameter()
+}
+
+// IsInvalidArgument returns true if the error is due to an invalid argument
+func IsInvalidArgument(err error) bool {
+ return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err)
+}
+
+// deadlineExceed maps to Moby's "ErrDeadline"
+type deadlineExceeded interface {
+ DeadlineExceeded()
+}
+
+// IsDeadlineExceeded returns true if the error is due to
+// `context.DeadlineExceeded`.
+func IsDeadlineExceeded(err error) bool {
+ return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err)
+}
+
+type errNotFound struct{}
+
+func (errNotFound) Error() string { return "not found" }
+
+func (errNotFound) NotFound() {}
+
+func (e errNotFound) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notFound maps to Moby's "ErrNotFound"
+type notFound interface {
+ NotFound()
+}
+
+// IsNotFound returns true if the error is due to a missing object
+func IsNotFound(err error) bool {
+ return errors.Is(err, ErrNotFound) || isInterface[notFound](err)
+}
+
+type errAlreadyExists struct{}
+
+func (errAlreadyExists) Error() string { return "already exists" }
+
+func (errAlreadyExists) AlreadyExists() {}
+
+func (e errAlreadyExists) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type alreadyExists interface {
+ AlreadyExists()
+}
+
+// IsAlreadyExists returns true if the error is due to an already existing
+// metadata item
+func IsAlreadyExists(err error) bool {
+ return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err)
+}
+
+type errPermissionDenied struct{}
+
+func (errPermissionDenied) Error() string { return "permission denied" }
+
+func (errPermissionDenied) Forbidden() {}
+
+func (e errPermissionDenied) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// forbidden maps to Moby's "ErrForbidden"
+type forbidden interface {
+ Forbidden()
+}
+
+// IsPermissionDenied returns true if the error is due to permission denied
+// or forbidden (403) response
+func IsPermissionDenied(err error) bool {
+ return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err)
+}
+
+type errResourceExhausted struct{}
+
+func (errResourceExhausted) Error() string { return "resource exhausted" }
+
+func (errResourceExhausted) ResourceExhausted() {}
+
+func (e errResourceExhausted) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type resourceExhausted interface {
+ ResourceExhausted()
+}
+
+// IsResourceExhausted returns true if the error is due to
+// a lack of resources or too many attempts.
+func IsResourceExhausted(err error) bool {
+ return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err)
+}
+
+type errFailedPrecondition struct{}
+
+func (e errFailedPrecondition) Error() string { return "failed precondition" }
+
+func (errFailedPrecondition) FailedPrecondition() {}
+
+func (e errFailedPrecondition) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type failedPrecondition interface {
+ FailedPrecondition()
+}
+
+// IsFailedPrecondition returns true if an operation could not proceed due to
+// the lack of a particular condition
+func IsFailedPrecondition(err error) bool {
+ return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err)
+}
+
+type errConflict struct{}
+
+func (errConflict) Error() string { return "conflict" }
+
+func (errConflict) Conflict() {}
+
+func (e errConflict) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// conflict maps to Moby's "ErrConflict"
+type conflict interface {
+ Conflict()
+}
+
+// IsConflict returns true if an operation could not proceed due to
+// a conflict.
+func IsConflict(err error) bool {
+ return errors.Is(err, errConflict{}) || isInterface[conflict](err)
+}
+
+type errNotModified struct{}
+
+func (errNotModified) Error() string { return "not modified" }
+
+func (errNotModified) NotModified() {}
+
+func (e errNotModified) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notModified maps to Moby's "ErrNotModified"
+type notModified interface {
+ NotModified()
+}
+
+// IsNotModified returns true if an operation could not proceed due
+// to an object not modified from a previous state.
+func IsNotModified(err error) bool {
+ return errors.Is(err, errNotModified{}) || isInterface[notModified](err)
+}
+
+type errAborted struct{}
+
+func (errAborted) Error() string { return "aborted" }
+
+func (errAborted) Aborted() {}
+
+func (e errAborted) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type aborted interface {
+ Aborted()
+}
+
+// IsAborted returns true if an operation was aborted.
+func IsAborted(err error) bool {
+ return errors.Is(err, errAborted{}) || isInterface[aborted](err)
+}
+
+type errOutOfRange struct{}
+
+func (errOutOfRange) Error() string { return "out of range" }
+
+func (errOutOfRange) OutOfRange() {}
+
+func (e errOutOfRange) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+type outOfRange interface {
+ OutOfRange()
+}
+
+// IsOutOfRange returns true if an operation could not proceed due
+// to data being out of the expected range.
+func IsOutOfRange(err error) bool {
+ return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err)
+}
+
+type errNotImplemented struct{}
+
+func (errNotImplemented) Error() string { return "not implemented" }
+
+func (errNotImplemented) NotImplemented() {}
+
+func (e errNotImplemented) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// notImplemented maps to Moby's "ErrNotImplemented"
+type notImplemented interface {
+ NotImplemented()
+}
+
+// IsNotImplemented returns true if the error is due to not being implemented
+func IsNotImplemented(err error) bool {
+ return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err)
+}
+
+type errInternal struct{}
+
+func (errInternal) Error() string { return "internal" }
+
+func (errInternal) System() {}
+
+func (e errInternal) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// system maps to Moby's "ErrSystem"
+type system interface {
+ System()
+}
+
+// IsInternal returns true if the error returns to an internal or system error
+func IsInternal(err error) bool {
+ return errors.Is(err, errInternal{}) || isInterface[system](err)
+}
+
+type errUnavailable struct{}
+
+func (errUnavailable) Error() string { return "unavailable" }
+
+func (errUnavailable) Unavailable() {}
+
+func (e errUnavailable) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unavailable maps to Moby's "ErrUnavailable"
+type unavailable interface {
+ Unavailable()
+}
+
+// IsUnavailable returns true if the error is due to a resource being unavailable
+func IsUnavailable(err error) bool {
+ return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err)
+}
+
+type errDataLoss struct{}
+
+func (errDataLoss) Error() string { return "data loss" }
+
+func (errDataLoss) DataLoss() {}
+
+func (e errDataLoss) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// dataLoss maps to Moby's "ErrDataLoss"
+type dataLoss interface {
+ DataLoss()
+}
+
+// IsDataLoss returns true if data during an operation was lost or corrupted
+func IsDataLoss(err error) bool {
+ return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err)
+}
+
+type errUnauthorized struct{}
+
+func (errUnauthorized) Error() string { return "unauthorized" }
+
+func (errUnauthorized) Unauthorized() {}
+
+func (e errUnauthorized) WithMessage(msg string) error {
+ return customMessage{e, msg}
+}
+
+// unauthorized maps to Moby's "ErrUnauthorized"
+type unauthorized interface {
+ Unauthorized()
+}
+
+// IsUnauthorized returns true if the error indicates that the user was
+// unauthenticated or unauthorized.
+func IsUnauthorized(err error) bool {
+ return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err)
+}
+
+func isInterface[T any](err error) bool {
+ for {
+ switch x := err.(type) {
+ case T:
+ return true
+ case customMessage:
+ err = x.err
+ case interface{ Unwrap() error }:
+ err = x.Unwrap()
+ if err == nil {
+ return false
+ }
+ case interface{ Unwrap() []error }:
+ for _, err := range x.Unwrap() {
+ if isInterface[T](err) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+ }
+}
+
+// customMessage is used to provide a defined error with a custom message.
+// The message is not wrapped but can be compared by the `Is(error) bool` interface.
+type customMessage struct {
+ err error
+ msg string
+}
+
+func (c customMessage) Is(err error) bool {
+ return c.err == err
+}
+
+func (c customMessage) As(target any) bool {
+ return errors.As(c.err, target)
+}
+
+func (c customMessage) Error() string {
+ return c.msg
+}
diff --git a/vendor/github.com/containerd/errdefs/pkg/LICENSE b/vendor/github.com/containerd/errdefs/pkg/LICENSE
new file mode 100644
index 00000000..584149b6
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go b/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go
new file mode 100644
index 00000000..d7cd2b8c
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go
@@ -0,0 +1,96 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package errhttp provides utility functions for translating errors to
+// and from a HTTP context.
+//
+// The functions ToHTTP and ToNative can be used to map server-side and
+// client-side errors to the correct types.
+package errhttp
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/containerd/errdefs"
+ "github.com/containerd/errdefs/pkg/internal/cause"
+)
+
+// ToHTTP returns the best status code for the given error
+func ToHTTP(err error) int {
+ switch {
+ case errdefs.IsNotFound(err):
+ return http.StatusNotFound
+ case errdefs.IsInvalidArgument(err):
+ return http.StatusBadRequest
+ case errdefs.IsConflict(err):
+ return http.StatusConflict
+ case errdefs.IsNotModified(err):
+ return http.StatusNotModified
+ case errdefs.IsFailedPrecondition(err):
+ return http.StatusPreconditionFailed
+ case errdefs.IsUnauthorized(err):
+ return http.StatusUnauthorized
+ case errdefs.IsPermissionDenied(err):
+ return http.StatusForbidden
+ case errdefs.IsResourceExhausted(err):
+ return http.StatusTooManyRequests
+ case errdefs.IsInternal(err):
+ return http.StatusInternalServerError
+ case errdefs.IsNotImplemented(err):
+ return http.StatusNotImplemented
+ case errdefs.IsUnavailable(err):
+ return http.StatusServiceUnavailable
+ case errdefs.IsUnknown(err):
+ var unexpected cause.ErrUnexpectedStatus
+ if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 {
+ return unexpected.Status
+ }
+ return http.StatusInternalServerError
+ default:
+ return http.StatusInternalServerError
+ }
+}
+
+// ToNative returns the error best matching the HTTP status code
+func ToNative(statusCode int) error {
+ switch statusCode {
+ case http.StatusNotFound:
+ return errdefs.ErrNotFound
+ case http.StatusBadRequest:
+ return errdefs.ErrInvalidArgument
+ case http.StatusConflict:
+ return errdefs.ErrConflict
+ case http.StatusPreconditionFailed:
+ return errdefs.ErrFailedPrecondition
+ case http.StatusUnauthorized:
+ return errdefs.ErrUnauthenticated
+ case http.StatusForbidden:
+ return errdefs.ErrPermissionDenied
+ case http.StatusNotModified:
+ return errdefs.ErrNotModified
+ case http.StatusTooManyRequests:
+ return errdefs.ErrResourceExhausted
+ case http.StatusInternalServerError:
+ return errdefs.ErrInternal
+ case http.StatusNotImplemented:
+ return errdefs.ErrNotImplemented
+ case http.StatusServiceUnavailable:
+ return errdefs.ErrUnavailable
+ default:
+ return cause.ErrUnexpectedStatus{Status: statusCode}
+ }
+}
diff --git a/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
new file mode 100644
index 00000000..d88756bb
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
@@ -0,0 +1,33 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package cause is used to define root causes for errors
+// common to errors packages like grpc and http.
+package cause
+
+import "fmt"
+
+type ErrUnexpectedStatus struct {
+ Status int
+}
+
+const UnexpectedStatusPrefix = "unexpected status "
+
+func (e ErrUnexpectedStatus) Error() string {
+ return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
+}
+
+func (ErrUnexpectedStatus) Unknown() {}
diff --git a/vendor/github.com/containerd/errdefs/resolve.go b/vendor/github.com/containerd/errdefs/resolve.go
new file mode 100644
index 00000000..c02d4a73
--- /dev/null
+++ b/vendor/github.com/containerd/errdefs/resolve.go
@@ -0,0 +1,147 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errdefs
+
+import "context"
+
+// Resolve returns the first error found in the error chain which matches an
+// error defined in this package or context error. A raw, unwrapped error is
+// returned or ErrUnknown if no matching error is found.
+//
+// This is useful for determining a response code based on the outermost wrapped
+// error rather than the original cause. For example, a not found error deep
+// in the code may be wrapped as an invalid argument. When determining status
+// code from Is* functions, the depth or ordering of the error is not
+// considered.
+//
+// The search order is depth first, a wrapped error returned from any part of
+// the chain from `Unwrap() error` will be returned before any joined errors
+// as returned by `Unwrap() []error`.
+func Resolve(err error) error {
+ if err == nil {
+ return nil
+ }
+ err = firstError(err)
+ if err == nil {
+ err = ErrUnknown
+ }
+ return err
+}
+
+func firstError(err error) error {
+ for {
+ switch err {
+ case ErrUnknown,
+ ErrInvalidArgument,
+ ErrNotFound,
+ ErrAlreadyExists,
+ ErrPermissionDenied,
+ ErrResourceExhausted,
+ ErrFailedPrecondition,
+ ErrConflict,
+ ErrNotModified,
+ ErrAborted,
+ ErrOutOfRange,
+ ErrNotImplemented,
+ ErrInternal,
+ ErrUnavailable,
+ ErrDataLoss,
+ ErrUnauthenticated,
+ context.DeadlineExceeded,
+ context.Canceled:
+ return err
+ }
+ switch e := err.(type) {
+ case customMessage:
+ err = e.err
+ case unknown:
+ return ErrUnknown
+ case invalidParameter:
+ return ErrInvalidArgument
+ case notFound:
+ return ErrNotFound
+ case alreadyExists:
+ return ErrAlreadyExists
+ case forbidden:
+ return ErrPermissionDenied
+ case resourceExhausted:
+ return ErrResourceExhausted
+ case failedPrecondition:
+ return ErrFailedPrecondition
+ case conflict:
+ return ErrConflict
+ case notModified:
+ return ErrNotModified
+ case aborted:
+ return ErrAborted
+ case errOutOfRange:
+ return ErrOutOfRange
+ case notImplemented:
+ return ErrNotImplemented
+ case system:
+ return ErrInternal
+ case unavailable:
+ return ErrUnavailable
+ case dataLoss:
+ return ErrDataLoss
+ case unauthorized:
+ return ErrUnauthenticated
+ case deadlineExceeded:
+ return context.DeadlineExceeded
+ case cancelled:
+ return context.Canceled
+ case interface{ Unwrap() error }:
+ err = e.Unwrap()
+ if err == nil {
+ return nil
+ }
+ case interface{ Unwrap() []error }:
+ for _, ue := range e.Unwrap() {
+ if fe := firstError(ue); fe != nil {
+ return fe
+ }
+ }
+ return nil
+ case interface{ Is(error) bool }:
+ for _, target := range []error{ErrUnknown,
+ ErrInvalidArgument,
+ ErrNotFound,
+ ErrAlreadyExists,
+ ErrPermissionDenied,
+ ErrResourceExhausted,
+ ErrFailedPrecondition,
+ ErrConflict,
+ ErrNotModified,
+ ErrAborted,
+ ErrOutOfRange,
+ ErrNotImplemented,
+ ErrInternal,
+ ErrUnavailable,
+ ErrDataLoss,
+ ErrUnauthenticated,
+ context.DeadlineExceeded,
+ context.Canceled} {
+ if e.Is(target) {
+ return target
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes
new file mode 100644
index 00000000..a0717e4b
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml
new file mode 100644
index 00000000..a695775d
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/.golangci.yml
@@ -0,0 +1,30 @@
+linters:
+ enable:
+ - exportloopref # Checks for pointers to enclosing loop variables
+ - gofmt
+ - goimports
+ - gosec
+ - ineffassign
+ - misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
+ - unconvert
+ - unused
+ - vet
+ - dupword # Checks for duplicate words in the source code
+ disable:
+ - errcheck
+
+run:
+ timeout: 5m
+ skip-dirs:
+ - api
+ - cluster
+ - design
+ - docs
+ - docs/man
+ - releases
+ - reports
+ - test # e2e scripts
diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE
new file mode 100644
index 00000000..584149b6
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md
new file mode 100644
index 00000000..2059de77
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/README.md
@@ -0,0 +1,32 @@
+# platforms
+
+A Go package for formatting, normalizing and matching container platforms.
+
+This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52).
+
+## Platform Specifier
+
+While the OCI platform specifications provide a tool for components to
+specify structured information, user input typically doesn't need the full
+context and much can be inferred. To solve this problem, this package introduces
+"specifiers". A specifier has the format
+`||/[/]`. The user can provide either the
+operating system or the architecture or both.
+
+An example of a common specifier is `linux/amd64`. If the host has a default
+runtime that matches this, the user can simply provide the component that
+matters. For example, if an image provides `amd64` and `arm64` support, the
+operating system, `linux` can be inferred, so they only have to provide
+`arm64` or `amd64`. Similar behavior is implemented for operating systems,
+where the architecture may be known but a runtime may support images from
+different operating systems.
+
+## Project details
+
+**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
\ No newline at end of file
diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go
new file mode 100644
index 00000000..3913ef66
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/compare.go
@@ -0,0 +1,203 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// MatchComparer is able to match and compare platforms to
+// filter and sort platforms.
+type MatchComparer interface {
+ Matcher
+
+ Less(specs.Platform, specs.Platform) bool
+}
+
+// platformVector returns an (ordered) vector of appropriate specs.Platform
+// objects to try matching for the given platform object (see platforms.Only).
+func platformVector(platform specs.Platform) []specs.Platform {
+ vector := []specs.Platform{platform}
+
+ switch platform.Architecture {
+ case "amd64":
+ if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 {
+ for amd64Version--; amd64Version >= 1; amd64Version-- {
+ vector = append(vector, specs.Platform{
+ Architecture: platform.Architecture,
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: "v" + strconv.Itoa(amd64Version),
+ })
+ }
+ }
+ vector = append(vector, specs.Platform{
+ Architecture: "386",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ })
+ case "arm":
+ if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
+ for armVersion--; armVersion >= 5; armVersion-- {
+ vector = append(vector, specs.Platform{
+ Architecture: platform.Architecture,
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: "v" + strconv.Itoa(armVersion),
+ })
+ }
+ }
+ case "arm64":
+ variant := platform.Variant
+ if variant == "" {
+ variant = "v8"
+ }
+ vector = append(vector, platformVector(specs.Platform{
+ Architecture: "arm",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: variant,
+ })...)
+ }
+
+ return vector
+}
+
+// Only returns a match comparer for a single platform
+// using default resolution logic for the platform.
+//
+// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
+// For arm/v7, will also match arm/v6 and arm/v5
+// For arm/v6, will also match arm/v5
+// For amd64, will also match 386
+func Only(platform specs.Platform) MatchComparer {
+ return Ordered(platformVector(Normalize(platform))...)
+}
+
+// OnlyStrict returns a match comparer for a single platform.
+//
+// Unlike Only, OnlyStrict does not match sub platforms.
+// So, "arm/vN" will not match "arm/vM" where M < N,
+// and "amd64" will not also match "386".
+//
+// OnlyStrict matches non-canonical forms.
+// So, "arm64" matches "arm/64/v8".
+func OnlyStrict(platform specs.Platform) MatchComparer {
+ return Ordered(Normalize(platform))
+}
+
+// Ordered returns a platform MatchComparer which matches any of the platforms
+// but orders them in order they are provided.
+func Ordered(platforms ...specs.Platform) MatchComparer {
+ matchers := make([]Matcher, len(platforms))
+ for i := range platforms {
+ matchers[i] = NewMatcher(platforms[i])
+ }
+ return orderedPlatformComparer{
+ matchers: matchers,
+ }
+}
+
+// Any returns a platform MatchComparer which matches any of the platforms
+// with no preference for ordering.
+func Any(platforms ...specs.Platform) MatchComparer {
+ matchers := make([]Matcher, len(platforms))
+ for i := range platforms {
+ matchers[i] = NewMatcher(platforms[i])
+ }
+ return anyPlatformComparer{
+ matchers: matchers,
+ }
+}
+
+// All is a platform MatchComparer which matches all platforms
+// with preference for ordering.
+var All MatchComparer = allPlatformComparer{}
+
+type orderedPlatformComparer struct {
+ matchers []Matcher
+}
+
+func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
+ for _, m := range c.matchers {
+ if m.Match(platform) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
+ for _, m := range c.matchers {
+ p1m := m.Match(p1)
+ p2m := m.Match(p2)
+ if p1m && !p2m {
+ return true
+ }
+ if p1m || p2m {
+ return false
+ }
+ }
+ return false
+}
+
+type anyPlatformComparer struct {
+ matchers []Matcher
+}
+
+func (c anyPlatformComparer) Match(platform specs.Platform) bool {
+ for _, m := range c.matchers {
+ if m.Match(platform) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
+ var p1m, p2m bool
+ for _, m := range c.matchers {
+ if !p1m && m.Match(p1) {
+ p1m = true
+ }
+ if !p2m && m.Match(p2) {
+ p2m = true
+ }
+ if p1m && p2m {
+ return false
+ }
+ }
+ // If one matches, and the other does, sort match first
+ return p1m && !p2m
+}
+
+type allPlatformComparer struct{}
+
+func (allPlatformComparer) Match(specs.Platform) bool {
+ return true
+}
+
+func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
+ return false
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go
new file mode 100644
index 00000000..91f50e8c
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+ "sync"
+
+ "github.com/containerd/log"
+)
+
+// Present the ARM instruction set architecture, eg: v7, v8
+// Don't use this value directly; call cpuVariant() instead.
+var cpuVariantValue string
+
+var cpuVariantOnce sync.Once
+
+func cpuVariant() string {
+ cpuVariantOnce.Do(func() {
+ if isArmArch(runtime.GOARCH) {
+ var err error
+ cpuVariantValue, err = getCPUVariant()
+ if err != nil {
+ log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err)
+ }
+ }
+ })
+ return cpuVariantValue
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go
new file mode 100644
index 00000000..98c7001f
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo_linux.go
@@ -0,0 +1,160 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// getMachineArch retrieves the machine architecture through system call
+func getMachineArch() (string, error) {
+ var uname unix.Utsname
+ err := unix.Uname(&uname)
+ if err != nil {
+ return "", err
+ }
+
+ arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)])
+
+ return arch, nil
+}
+
+// For Linux, the kernel has already detected the ABI, ISA and Features.
+// So we don't need to access the ARM registers to detect platform information
+// by ourselves. We can just parse these information from /proc/cpuinfo
+func getCPUInfo(pattern string) (info string, err error) {
+
+ cpuinfo, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return "", err
+ }
+ defer cpuinfo.Close()
+
+ // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
+ // the first core is enough.
+ scanner := bufio.NewScanner(cpuinfo)
+ for scanner.Scan() {
+ newline := scanner.Text()
+ list := strings.Split(newline, ":")
+
+ if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
+ return strings.TrimSpace(list[1]), nil
+ }
+ }
+
+ // Check whether the scanner encountered errors
+ err = scanner.Err()
+ if err != nil {
+ return "", err
+ }
+
+ return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound)
+}
+
+// getCPUVariantFromArch get CPU variant from arch through a system call
+func getCPUVariantFromArch(arch string) (string, error) {
+
+ var variant string
+
+ arch = strings.ToLower(arch)
+
+ if arch == "aarch64" {
+ variant = "8"
+ } else if arch[0:4] == "armv" && len(arch) >= 5 {
+ // Valid arch format is in form of armvXx
+ switch arch[3:5] {
+ case "v8":
+ variant = "8"
+ case "v7":
+ variant = "7"
+ case "v6":
+ variant = "6"
+ case "v5":
+ variant = "5"
+ case "v4":
+ variant = "4"
+ case "v3":
+ variant = "3"
+ default:
+ variant = "unknown"
+ }
+ } else {
+ return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument)
+ }
+ return variant, nil
+}
+
+// getCPUVariant returns cpu variant for ARM
+// We first try reading "Cpu architecture" field from /proc/cpuinfo
+// If we can't find it, then fall back using a system call
+// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo
+// was not present.
+func getCPUVariant() (string, error) {
+ variant, err := getCPUInfo("Cpu architecture")
+ if err != nil {
+ if errors.Is(err, errNotFound) {
+ // Let's try getting CPU variant from machine architecture
+ arch, err := getMachineArch()
+ if err != nil {
+ return "", fmt.Errorf("failure getting machine architecture: %v", err)
+ }
+
+ variant, err = getCPUVariantFromArch(arch)
+ if err != nil {
+ return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err)
+ }
+ } else {
+ return "", fmt.Errorf("failure getting CPU variant: %v", err)
+ }
+ }
+
+ // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
+ // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
+ if runtime.GOARCH == "arm" && variant == "7" {
+ model, err := getCPUInfo("model name")
+ if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
+ variant = "6"
+ }
+ }
+
+ switch strings.ToLower(variant) {
+ case "8", "aarch64":
+ variant = "v8"
+ case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
+ variant = "v7"
+ case "6", "6tej":
+ variant = "v6"
+ case "5", "5t", "5te", "5tej":
+ variant = "v5"
+ case "4", "4t":
+ variant = "v4"
+ case "3":
+ variant = "v3"
+ default:
+ variant = "unknown"
+ }
+
+ return variant, nil
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go
new file mode 100644
index 00000000..97a1fe8a
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo_other.go
@@ -0,0 +1,55 @@
+//go:build !linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func getCPUVariant() (string, error) {
+
+ var variant string
+
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
+ // runtime.GOARCH to determine the variants
+ switch runtime.GOARCH {
+ case "arm64":
+ variant = "v8"
+ case "arm":
+ variant = "v7"
+ default:
+ variant = "unknown"
+ }
+ } else if runtime.GOOS == "freebsd" {
+ // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated)
+ // detecting those variants is currently unimplemented
+ switch runtime.GOARCH {
+ case "arm64":
+ variant = "v8"
+ default:
+ variant = "unknown"
+ }
+ } else {
+ return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented)
+ }
+
+ return variant, nil
+}
diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go
new file mode 100644
index 00000000..2e26fd3b
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/database.go
@@ -0,0 +1,109 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+ "strings"
+)
+
+// These function are generated from https://golang.org/src/go/build/syslist.go.
+//
+// We use switch statements because they are slightly faster than map lookups
+// and use a little less memory.
+
+// isKnownOS returns true if we know about the operating system.
+//
+// The OS value should be normalized before calling this function.
+func isKnownOS(os string) bool {
+ switch os {
+ case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
+ return true
+ }
+ return false
+}
+
+// isArmArch returns true if the architecture is ARM.
+//
+// The arch value should be normalized before being passed to this function.
+func isArmArch(arch string) bool {
+ switch arch {
+ case "arm", "arm64":
+ return true
+ }
+ return false
+}
+
+// isKnownArch returns true if we know about the architecture.
+//
+// The arch value should be normalized before being passed to this function.
+func isKnownArch(arch string) bool {
+ switch arch {
+ case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
+ return true
+ }
+ return false
+}
+
+func normalizeOS(os string) string {
+ if os == "" {
+ return runtime.GOOS
+ }
+ os = strings.ToLower(os)
+
+ switch os {
+ case "macos":
+ os = "darwin"
+ }
+ return os
+}
+
+// normalizeArch normalizes the architecture.
+func normalizeArch(arch, variant string) (string, string) {
+ arch, variant = strings.ToLower(arch), strings.ToLower(variant)
+ switch arch {
+ case "i386":
+ arch = "386"
+ variant = ""
+ case "x86_64", "x86-64", "amd64":
+ arch = "amd64"
+ if variant == "v1" {
+ variant = ""
+ }
+ case "aarch64", "arm64":
+ arch = "arm64"
+ switch variant {
+ case "8", "v8":
+ variant = ""
+ }
+ case "armhf":
+ arch = "arm"
+ variant = "v7"
+ case "armel":
+ arch = "arm"
+ variant = "v6"
+ case "arm":
+ switch variant {
+ case "", "7":
+ variant = "v7"
+ case "5", "6", "8":
+ variant = "v" + variant
+ }
+ }
+
+ return arch, variant
+}
diff --git a/vendor/github.com/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go
new file mode 100644
index 00000000..9d898d60
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults.go
@@ -0,0 +1,29 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+// DefaultString returns the default string specifier for the platform,
+// with [PR#6](https://github.com/containerd/platforms/pull/6) the result
+// may now also include the OSVersion from the provided platform specification.
+func DefaultString() string {
+ return FormatAll(DefaultSpec())
+}
+
+// DefaultStrict returns strict form of Default.
+func DefaultStrict() MatchComparer {
+ return OnlyStrict(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go
new file mode 100644
index 00000000..72355ca8
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_darwin.go
@@ -0,0 +1,44 @@
+//go:build darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Ordered(DefaultSpec(), specs.Platform{
+ // darwin runtime also supports Linux binary via runu/LKL
+ OS: "linux",
+ Architecture: runtime.GOARCH,
+ })
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go
new file mode 100644
index 00000000..d3fe89e0
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_freebsd.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Ordered(DefaultSpec(), specs.Platform{
+ OS: "linux",
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ })
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go
new file mode 100644
index 00000000..44acc47e
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_unix.go
@@ -0,0 +1,40 @@
+//go:build !windows && !darwin && !freebsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Only(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go
new file mode 100644
index 00000000..427ed72e
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_windows.go
@@ -0,0 +1,118 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sys/windows"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ major, minor, build := windows.RtlGetNtVersionNumbers()
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build),
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+type windowsmatcher struct {
+ specs.Platform
+ osVersionPrefix string
+ defaultMatcher Matcher
+}
+
+// Match matches platform with the same windows major, minor
+// and build version.
+func (m windowsmatcher) Match(p specs.Platform) bool {
+ match := m.defaultMatcher.Match(p)
+
+ if match && m.OS == "windows" {
+ // HPC containers do not have OS version filled
+ if m.OSVersion == "" || p.OSVersion == "" {
+ return true
+ }
+
+ hostOsVersion := getOSVersion(m.osVersionPrefix)
+ ctrOsVersion := getOSVersion(p.OSVersion)
+ return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion)
+ }
+
+ return match
+}
+
+func getOSVersion(osVersionPrefix string) osVersion {
+ parts := strings.Split(osVersionPrefix, ".")
+ if len(parts) < 3 {
+ return osVersion{}
+ }
+
+ majorVersion, _ := strconv.Atoi(parts[0])
+ minorVersion, _ := strconv.Atoi(parts[1])
+ buildNumber, _ := strconv.Atoi(parts[2])
+
+ return osVersion{
+ MajorVersion: uint8(majorVersion),
+ MinorVersion: uint8(minorVersion),
+ Build: uint16(buildNumber),
+ }
+}
+
+// Less sorts matched platforms in front of other platforms.
+// For matched platforms, it puts platforms with larger revision
+// number in front.
+func (m windowsmatcher) Less(p1, p2 specs.Platform) bool {
+ m1, m2 := m.Match(p1), m.Match(p2)
+ if m1 && m2 {
+ r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion)
+ return r1 > r2
+ }
+ return m1 && !m2
+}
+
+func revision(v string) int {
+ parts := strings.Split(v, ".")
+ if len(parts) < 4 {
+ return 0
+ }
+ r, err := strconv.Atoi(parts[3])
+ if err != nil {
+ return 0
+ }
+ return r
+}
+
+func prefix(v string) string {
+ parts := strings.Split(v, ".")
+ if len(parts) < 4 {
+ return v
+ }
+ return strings.Join(parts[0:3], ".")
+}
+
+// Default returns the current platform's default platform specification.
+func Default() MatchComparer {
+ return Only(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go
new file mode 100644
index 00000000..5ad721e7
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/errors.go
@@ -0,0 +1,30 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import "errors"
+
+// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs],
+// however, they are not exported as they are not expected to be used as sentinel
+// errors by consumers of this package.
+//
+//nolint:unused // not all errors are used on all platforms.
+var (
+ errNotFound = errors.New("not found")
+ errInvalidArgument = errors.New("invalid argument")
+ errNotImplemented = errors.New("not implemented")
+)
diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_compat_windows.go
new file mode 100644
index 00000000..89e66f0c
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platform_compat_windows.go
@@ -0,0 +1,78 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+// osVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type osVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// Windows Client and Server build numbers.
+//
+// See:
+// https://learn.microsoft.com/en-us/windows/release-health/release-information
+// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info
+// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
+const (
+ // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server
+ // 2019 (ltsc2019), and Windows 10 (October 2018 Update).
+ rs5 = 17763
+
+ // v21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ v21H2Server = 20348
+
+ // v22H2Win11 corresponds to Windows 11 (2022 Update).
+ v22H2Win11 = 22621
+)
+
+// List of stable ABI compliant ltsc releases
+// Note: List must be sorted in ascending order
+var compatLTSCReleases = []uint16{
+ v21H2Server,
+}
+
+// CheckHostAndContainerCompat checks if given host and container
+// OS versions are compatible.
+// It includes support for stable ABI compliant versions as well.
+// Every release after WS 2022 will support the previous ltsc
+// container image. Stable ABI is in preview mode for windows 11 client.
+// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility
+func checkHostAndContainerCompat(host, ctr osVersion) bool {
+ // check major minor versions of host and guest
+ if host.MajorVersion != ctr.MajorVersion ||
+ host.MinorVersion != ctr.MinorVersion {
+ return false
+ }
+
+ // If host is < WS 2022, exact version match is required
+ if host.Build < v21H2Server {
+ return host.Build == ctr.Build
+ }
+
+ var supportedLtscRelease uint16
+ for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
+ if host.Build >= compatLTSCReleases[i] {
+ supportedLtscRelease = compatLTSCReleases[i]
+ break
+ }
+ }
+ return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
+}
diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go
new file mode 100644
index 00000000..1bbbdb91
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms.go
@@ -0,0 +1,308 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package platforms provides a toolkit for normalizing, matching and
+// specifying container platforms.
+//
+// Centered around OCI platform specifications, we define a string-based
+// specifier syntax that can be used for user input. With a specifier, users
+// only need to specify the parts of the platform that are relevant to their
+// context, providing an operating system or architecture or both.
+//
+// How do I use this package?
+//
+// The vast majority of use cases should simply use the match function with
+// user input. The first step is to parse a specifier into a matcher:
+//
+// m, err := Parse("linux")
+// if err != nil { ... }
+//
+// Once you have a matcher, use it to match against the platform declared by a
+// component, typically from an image or runtime. Since extracting an images
+// platform is a little more involved, we'll use an example against the
+// platform default:
+//
+// if ok := m.Match(Default()); !ok { /* doesn't match */ }
+//
+// This can be composed in loops for resolving runtimes or used as a filter for
+// fetch and select images.
+//
+// More details of the specifier syntax and platform spec follow.
+//
+// # Declaring Platform Support
+//
+// Components that have strict platform requirements should use the OCI
+// platform specification to declare their support. Typically, this will be
+// images and runtimes that should make these declaring which platform they
+// support specifically. This looks roughly as follows:
+//
+// type Platform struct {
+// Architecture string
+// OS string
+// Variant string
+// }
+//
+// Most images and runtimes should at least set Architecture and OS, according
+// to their GOARCH and GOOS values, respectively (follow the OCI image
+// specification when in doubt). ARM should set variant under certain
+// discussions, which are outlined below.
+//
+// # Platform Specifiers
+//
+// While the OCI platform specifications provide a tool for components to
+// specify structured information, user input typically doesn't need the full
+// context and much can be inferred. To solve this problem, we introduced
+// "specifiers". A specifier has the format
+// `||/[/]`. The user can provide either the
+// operating system or the architecture or both.
+//
+// An example of a common specifier is `linux/amd64`. If the host has a default
+// of runtime that matches this, the user can simply provide the component that
+// matters. For example, if a image provides amd64 and arm64 support, the
+// operating system, `linux` can be inferred, so they only have to provide
+// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
+// where the architecture may be known but a runtime may support images from
+// different operating systems.
+//
+// # Normalization
+//
+// Because not all users are familiar with the way the Go runtime represents
+// platforms, several normalizations have been provided to make this package
+// easier to user.
+//
+// The following are performed for architectures:
+//
+// Value Normalized
+// aarch64 arm64
+// armhf arm
+// armel arm/v6
+// i386 386
+// x86_64 amd64
+// x86-64 amd64
+//
+// We also normalize the operating system `macos` to `darwin`.
+//
+// # ARM Support
+//
+// To qualify ARM architecture, the Variant field is used to qualify the arm
+// version. The most common arm version, v7, is represented without the variant
+// unless it is explicitly provided. This is treated as equivalent to armhf. A
+// previous architecture, armel, will be normalized to arm/v6.
+//
+// Similarly, the most common arm64 version v8, and most common amd64 version v1
+// are represented without the variant.
+//
+// While these normalizations are provided, their support on arm platforms has
+// not yet been fully implemented and tested.
+package platforms
+
+import (
+ "fmt"
+ "path"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var (
+ specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
+ osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`)
+)
+
+const osAndVersionFormat = "%s(%s)"
+
+// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere.
+type Platform = specs.Platform
+
+// Matcher matches platforms specifications, provided by an image or runtime.
+type Matcher interface {
+ Match(platform specs.Platform) bool
+}
+
+// NewMatcher returns a simple matcher based on the provided platform
+// specification. The returned matcher only looks for equality based on os,
+// architecture and variant.
+//
+// One may implement their own matcher if this doesn't provide the required
+// functionality.
+//
+// Applications should opt to use `Match` over directly parsing specifiers.
+func NewMatcher(platform specs.Platform) Matcher {
+ return newDefaultMatcher(platform)
+}
+
+type matcher struct {
+ specs.Platform
+}
+
+func (m *matcher) Match(platform specs.Platform) bool {
+ normalized := Normalize(platform)
+ return m.OS == normalized.OS &&
+ m.Architecture == normalized.Architecture &&
+ m.Variant == normalized.Variant
+}
+
+func (m *matcher) String() string {
+ return FormatAll(m.Platform)
+}
+
+// ParseAll parses a list of platform specifiers into a list of platform.
+func ParseAll(specifiers []string) ([]specs.Platform, error) {
+ platforms := make([]specs.Platform, len(specifiers))
+ for i, s := range specifiers {
+ p, err := Parse(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid platform %s: %w", s, err)
+ }
+ platforms[i] = p
+ }
+ return platforms, nil
+}
+
+// Parse parses the platform specifier syntax into a platform declaration.
+//
+// Platform specifiers are in the format `[()]||[()]/[/]`.
+// The minimum required information for a platform specifier is the operating
+// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)`
+// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value,
+// and an empty string otherwise.
+// If there is only a single string (no slashes), the
+// value will be matched against the known set of operating systems, then fall
+// back to the known set of architectures. The missing component will be
+// inferred based on the local environment.
+func Parse(specifier string) (specs.Platform, error) {
+ if strings.Contains(specifier, "*") {
+ // TODO(stevvooe): need to work out exact wildcard handling
+ return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument)
+ }
+
+ // Limit to 4 elements to prevent unbounded split
+ parts := strings.SplitN(specifier, "/", 4)
+
+ var p specs.Platform
+ for i, part := range parts {
+ if i == 0 {
+ // First element is [()]
+ osVer := osAndVersionRe.FindStringSubmatch(part)
+ if osVer == nil {
+ return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument)
+ }
+
+ p.OS = normalizeOS(osVer[1])
+ p.OSVersion = osVer[2]
+ } else {
+ if !specifierRe.MatchString(part) {
+ return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument)
+ }
+ }
+ }
+
+ switch len(parts) {
+ case 1:
+ // in this case, we will test that the value might be an OS (with or
+ // without the optional OSVersion specified) and look it up.
+ // If it is not known, we'll treat it as an architecture. Since
+ // we have very little information about the platform here, we are
+ // going to be a little more strict if we don't know about the argument
+ // value.
+ if isKnownOS(p.OS) {
+ // picks a default architecture
+ p.Architecture = runtime.GOARCH
+ if p.Architecture == "arm" && cpuVariant() != "v7" {
+ p.Variant = cpuVariant()
+ }
+
+ return p, nil
+ }
+
+ p.Architecture, p.Variant = normalizeArch(parts[0], "")
+ if p.Architecture == "arm" && p.Variant == "v7" {
+ p.Variant = ""
+ }
+ if isKnownArch(p.Architecture) {
+ p.OS = runtime.GOOS
+ return p, nil
+ }
+
+ return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument)
+ case 2:
+ // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care
+ // about whether or not we know of the platform.
+ p.Architecture, p.Variant = normalizeArch(parts[1], "")
+ if p.Architecture == "arm" && p.Variant == "v7" {
+ p.Variant = ""
+ }
+
+ return p, nil
+ case 3:
+ // we have a fully specified variant, this is rare
+ p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
+ if p.Architecture == "arm64" && p.Variant == "" {
+ p.Variant = "v8"
+ }
+
+ return p, nil
+ }
+
+ return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument)
+}
+
+// MustParse is like Parses but panics if the specifier cannot be parsed.
+// Simplifies initialization of global variables.
+func MustParse(specifier string) specs.Platform {
+ p, err := Parse(specifier)
+ if err != nil {
+ panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
+ }
+ return p
+}
+
+// Format returns a string specifier from the provided platform specification.
+func Format(platform specs.Platform) string {
+ if platform.OS == "" {
+ return "unknown"
+ }
+
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
+
+// FormatAll returns a string specifier that also includes the OSVersion from the
+// provided platform specification.
+func FormatAll(platform specs.Platform) string {
+ if platform.OS == "" {
+ return "unknown"
+ }
+
+ if platform.OSVersion != "" {
+ OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion)
+ return path.Join(OSAndVersion, platform.Architecture, platform.Variant)
+ }
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
+
+// Normalize validates and translate the platform to the canonical value.
+//
+// For example, if "Aarch64" is encountered, we change it to "arm64" or if
+// "x86_64" is encountered, it becomes "amd64".
+func Normalize(platform specs.Platform) specs.Platform {
+ platform.OS = normalizeOS(platform.OS)
+ platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
+
+ return platform
+}
diff --git a/vendor/github.com/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go
new file mode 100644
index 00000000..03f4dcd9
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms_other.go
@@ -0,0 +1,30 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// NewMatcher returns the default Matcher for containerd
+func newDefaultMatcher(platform specs.Platform) Matcher {
+ return &matcher{
+ Platform: Normalize(platform),
+ }
+}
diff --git a/vendor/github.com/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go
new file mode 100644
index 00000000..950e2a2d
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms_windows.go
@@ -0,0 +1,34 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// NewMatcher returns a Windows matcher that will match on osVersionPrefix if
+// the platform is Windows otherwise use the default matcher
+func newDefaultMatcher(platform specs.Platform) Matcher {
+ prefix := prefix(platform.OSVersion)
+ return windowsmatcher{
+ Platform: platform,
+ osVersionPrefix: prefix,
+ defaultMatcher: &matcher{
+ Platform: Normalize(platform),
+ },
+ }
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
index 62d91b77..5673f5c0 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
@@ -1,3 +1,4 @@
+// Package md2man aims in converting markdown into roff (man pages).
package md2man
import (
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
index 96a80c99..4f1070fc 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -47,13 +47,13 @@ const (
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
+ tableCellEnd = "\nT}"
tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
-func NewRoffRenderer() *roffRenderer { // nolint: golint
+func NewRoffRenderer() *roffRenderer {
return &roffRenderer{}
}
@@ -316,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
- if node.Next == nil && end != tableCellEnd {
- // Last cell: need to carriage return if we are at the end of the
- // header row and content isn't wrapped in a "tablecell"
+ if node.Next == nil {
+ // Last cell: need to carriage return if we are at the end of the header row.
end += crTag
}
out(w, end)
@@ -356,7 +355,7 @@ func countColumns(node *blackfriday.Node) int {
}
func out(w io.Writer, output string) {
- io.WriteString(w, output) // nolint: errcheck
+ io.WriteString(w, output) //nolint:errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
@@ -395,7 +394,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
i++
}
if i > org {
- w.Write(text[org:i]) // nolint: errcheck
+ w.Write(text[org:i]) //nolint:errcheck
}
// escape a character
@@ -403,7 +402,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
break
}
- w.Write([]byte{'\\', text[i]}) // nolint: errcheck
+ w.Write([]byte{'\\', text[i]}) //nolint:errcheck
}
}
diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS
index ad1abd49..c5a480b5 100644
--- a/vendor/github.com/docker/cli/AUTHORS
+++ b/vendor/github.com/docker/cli/AUTHORS
@@ -48,6 +48,7 @@ Alfred Landrum
Ali Rostami
Alicia Lauerman
Allen Sun
+Allie Sadler
Alvin Deng
Amen Belayneh
Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
@@ -81,6 +82,7 @@ Antonis Kalipetis
Anusha Ragunathan
Ao Li
Arash Deshmeh
+Archimedes Trajano
Arko Dasgupta
Arnaud Porterie
Arnaud Rebillout
@@ -88,6 +90,7 @@ Arthur Peka
Ashly Mathew
Ashwini Oruganti
Aslam Ahemad
+Austin Vazquez
Azat Khuyiyakhmetov
Bardia Keyoumarsi
Barnaby Gray
@@ -132,6 +135,7 @@ Cao Weiwei
Carlo Mion
Carlos Alexandro Becker
Carlos de Paula
+Carston Schilds
Casey Korver
Ce Gao
Cedric Davies
@@ -189,6 +193,7 @@ Daisuke Ito
dalanlan
Damien Nadé
Dan Cotora
+Dan Wallis
Danial Gharib
Daniel Artine
Daniel Cassidy
@@ -237,6 +242,7 @@ Deshi Xiao
Dharmit Shah
Dhawal Yogesh Bhanushali
Dieter Reuter
+Dilep Dev <34891655+DilepDev@users.noreply.github.com>
Dima Stopel
Dimitry Andric
Ding Fei
@@ -308,6 +314,8 @@ George MacRorie
George Margaritis
George Xie
Gianluca Borello
+Giau. Tran Minh
+Giedrius Jonikas
Gildas Cuisinier
Gio d'Amelio
Gleb Stsenov
@@ -344,6 +352,7 @@ Hugo Gabriel Eyherabide
huqun
Huu Nguyen
Hyzhou Zhy
+Iain MacDonald
Iain Samuel McLean Elder
Ian Campbell
Ian Philpot
@@ -393,6 +402,7 @@ Jesse Adametz
Jessica Frazelle
Jezeniel Zapanta
Jian Zhang
+Jianyong Wu
Jie Luo
Jilles Oldenbeuving
Jim Chen
@@ -446,6 +456,7 @@ Julian
Julien Barbier
Julien Kassar
Julien Maitrehenry
+Julio Cesar Garcia
Justas Brazauskas
Justin Chadwell
Justin Cormack
@@ -490,19 +501,22 @@ Kunal Kushwaha
Kyle Mitofsky
Lachlan Cooper
Lai Jiangshan
+Lajos Papp
Lars Kellogg-Stedman
Laura Brehm
Laura Frank
Laurent Erignoux
+Laurent Goderre
Lee Gaines
Lei Jitang
Lennie
+lentil32
Leo Gallucci
Leonid Skorospelov
Lewis Daly
Li Fu Bang
Li Yi
-Li Yi
+Li Zeghong
Liang-Chi Hsieh
Lihua Tang
Lily Guo
@@ -515,6 +529,7 @@ lixiaobing10051267
Lloyd Dewolf
Lorenzo Fontana
Louis Opter
+Lovekesh Kumar
Luca Favatella
Luca Marturana
Lucas Chan
@@ -559,6 +574,7 @@ Matt Robenolt
Matteo Orefice
Matthew Heon
Matthieu Hauglustaine
+Matthieu MOREL
Mauro Porras P
Max Shytikov
Max-Julian Pogner
@@ -566,6 +582,7 @@ Maxime Petazzoni
Maximillian Fan Xavier
Mei ChunTao
Melroy van den Berg
+Mert Şişmanoğlu
Metal <2466052+tedhexaflow@users.noreply.github.com>
Micah Zoltu
Michael A. Smith
@@ -598,7 +615,9 @@ Mindaugas Rukas
Miroslav Gula
Misty Stanley-Jones
Mohammad Banikazemi
+Mohammad Hossein
Mohammed Aaqib Ansari
+Mohammed Aminu Futa
Mohini Anne Dsouza
Moorthy RS
Morgan Bauer
@@ -633,9 +652,11 @@ Nicolas De Loof
Nikhil Chawla
Nikolas Garofil
Nikolay Milovanov
+NinaLua
Nir Soffer
Nishant Totla
NIWA Hideyuki
+Noah Silas
Noah Treuhaft
O.S. Tezer
Oded Arbel
@@ -653,10 +674,12 @@ Patrick Böänziger
Patrick Daigle <114765035+pdaig@users.noreply.github.com>
Patrick Hemmer
Patrick Lang
+Patrick St. laurent
Paul
Paul Kehrer
Paul Lietar
Paul Mulders
+Paul Rogalski
Paul Seyfert
Paul Weaver
Pavel Pospisil
@@ -678,7 +701,6 @@ Philip Alexander Etling
Philipp Gillé
Philipp Schmied
Phong Tran
-pidster
Pieter E Smit
pixelistik
Pratik Karki
@@ -738,6 +760,7 @@ Samuel Cochran
Samuel Karp
Sandro Jäckel
Santhosh Manohar
+Sarah Sanders
Sargun Dhillon
Saswat Bhattacharya
Saurabh Kumar
@@ -770,6 +793,7 @@ Spencer Brown
Spring Lee
squeegels
Srini Brahmaroutu
+Stavros Panakakis
Stefan S.
Stefan Scherer
Stefan Weil
@@ -780,6 +804,7 @@ Steve Durrheimer
Steve Richards
Steven Burgess
Stoica-Marcu Floris-Andrei
+Stuart Williams
Subhajit Ghosh
Sun Jianbo
Sune Keller
@@ -867,6 +892,7 @@ Wang Yumu <37442693@qq.com>
Wataru Ishida
Wayne Song
Wen Cheng Ma
+Wenlong Zhang
Wenzhi Liang
Wes Morgan
Wewang Xiaorenfine
@@ -908,3 +934,4 @@ Zhuo Zhi
Átila Camurça Alves
Александр Менщиков <__Singleton__@hackerdom.ru>
徐俊杰
+林博仁 Buo-ren Lin
diff --git a/vendor/github.com/docker/cli/cli-plugins/hooks/printer.go b/vendor/github.com/docker/cli/cli-plugins/hooks/printer.go
deleted file mode 100644
index f6d4b28e..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/hooks/printer.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package hooks
-
-import (
- "fmt"
- "io"
-
- "github.com/morikuni/aec"
-)
-
-func PrintNextSteps(out io.Writer, messages []string) {
- if len(messages) == 0 {
- return
- }
- _, _ = fmt.Fprintln(out, aec.Bold.Apply("\nWhat's next:"))
- for _, n := range messages {
- _, _ = fmt.Fprintln(out, " ", n)
- }
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/hooks/template.go b/vendor/github.com/docker/cli/cli-plugins/hooks/template.go
deleted file mode 100644
index e6bd69f3..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/hooks/template.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package hooks
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "text/template"
-
- "github.com/spf13/cobra"
-)
-
-type HookType int
-
-const (
- NextSteps = iota
-)
-
-// HookMessage represents a plugin hook response. Plugins
-// declaring support for CLI hooks need to print a json
-// representation of this type when their hook subcommand
-// is invoked.
-type HookMessage struct {
- Type HookType
- Template string
-}
-
-// TemplateReplaceSubcommandName returns a hook template string
-// that will be replaced by the CLI subcommand being executed
-//
-// Example:
-//
-// "you ran the subcommand: " + TemplateReplaceSubcommandName()
-//
-// when being executed after the command:
-// `docker run --name "my-container" alpine`
-// will result in the message:
-// `you ran the subcommand: run`
-func TemplateReplaceSubcommandName() string {
- return hookTemplateCommandName
-}
-
-// TemplateReplaceFlagValue returns a hook template string
-// that will be replaced by the flags value.
-//
-// Example:
-//
-// "you ran a container named: " + TemplateReplaceFlagValue("name")
-//
-// when being executed after the command:
-// `docker run --name "my-container" alpine`
-// will result in the message:
-// `you ran a container named: my-container`
-func TemplateReplaceFlagValue(flag string) string {
- return fmt.Sprintf(hookTemplateFlagValue, flag)
-}
-
-// TemplateReplaceArg takes an index i and returns a hook
-// template string that the CLI will replace the template with
-// the ith argument, after processing the passed flags.
-//
-// Example:
-//
-// "run this image with `docker run " + TemplateReplaceArg(0) + "`"
-//
-// when being executed after the command:
-// `docker pull alpine`
-// will result in the message:
-// "Run this image with `docker run alpine`"
-func TemplateReplaceArg(i int) string {
- return fmt.Sprintf(hookTemplateArg, strconv.Itoa(i))
-}
-
-func ParseTemplate(hookTemplate string, cmd *cobra.Command) ([]string, error) {
- tmpl := template.New("").Funcs(commandFunctions)
- tmpl, err := tmpl.Parse(hookTemplate)
- if err != nil {
- return nil, err
- }
- b := bytes.Buffer{}
- err = tmpl.Execute(&b, cmd)
- if err != nil {
- return nil, err
- }
- return strings.Split(b.String(), "\n"), nil
-}
-
-var ErrHookTemplateParse = errors.New("failed to parse hook template")
-
-const (
- hookTemplateCommandName = "{{.Name}}"
- hookTemplateFlagValue = `{{flag . "%s"}}`
- hookTemplateArg = "{{arg . %s}}"
-)
-
-var commandFunctions = template.FuncMap{
- "flag": getFlagValue,
- "arg": getArgValue,
-}
-
-func getFlagValue(cmd *cobra.Command, flag string) (string, error) {
- cmdFlag := cmd.Flag(flag)
- if cmdFlag == nil {
- return "", ErrHookTemplateParse
- }
- return cmdFlag.Value.String(), nil
-}
-
-func getArgValue(cmd *cobra.Command, i int) (string, error) {
- flags := cmd.Flags()
- if flags == nil {
- return "", ErrHookTemplateParse
- }
- return flags.Arg(i), nil
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/candidate.go b/vendor/github.com/docker/cli/cli-plugins/manager/candidate.go
deleted file mode 100644
index e65ac1a5..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/candidate.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package manager
-
-import "os/exec"
-
-// Candidate represents a possible plugin candidate, for mocking purposes
-type Candidate interface {
- Path() string
- Metadata() ([]byte, error)
-}
-
-type candidate struct {
- path string
-}
-
-func (c *candidate) Path() string {
- return c.path
-}
-
-func (c *candidate) Metadata() ([]byte, error) {
- return exec.Command(c.path, MetadataSubcommandName).Output() // #nosec G204 -- ignore "Subprocess launched with a potential tainted input or cmd arguments"
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/cobra.go b/vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
deleted file mode 100644
index 4bfa06fa..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package manager
-
-import (
- "fmt"
- "net/url"
- "os"
- "strings"
- "sync"
-
- "github.com/docker/cli/cli/command"
- "github.com/spf13/cobra"
- "go.opentelemetry.io/otel/attribute"
-)
-
-const (
- // CommandAnnotationPlugin is added to every stub command added by
- // AddPluginCommandStubs with the value "true" and so can be
- // used to distinguish plugin stubs from regular commands.
- CommandAnnotationPlugin = "com.docker.cli.plugin"
-
- // CommandAnnotationPluginVendor is added to every stub command
- // added by AddPluginCommandStubs and contains the vendor of
- // that plugin.
- CommandAnnotationPluginVendor = "com.docker.cli.plugin.vendor"
-
- // CommandAnnotationPluginVersion is added to every stub command
- // added by AddPluginCommandStubs and contains the version of
- // that plugin.
- CommandAnnotationPluginVersion = "com.docker.cli.plugin.version"
-
- // CommandAnnotationPluginInvalid is added to any stub command
- // added by AddPluginCommandStubs for an invalid command (that
- // is, one which failed it's candidate test) and contains the
- // reason for the failure.
- CommandAnnotationPluginInvalid = "com.docker.cli.plugin-invalid"
-
- // CommandAnnotationPluginCommandPath is added to overwrite the
- // command path for a plugin invocation.
- CommandAnnotationPluginCommandPath = "com.docker.cli.plugin.command_path"
-)
-
-var pluginCommandStubsOnce sync.Once
-
-// AddPluginCommandStubs adds a stub cobra.Commands for each valid and invalid
-// plugin. The command stubs will have several annotations added, see
-// `CommandAnnotationPlugin*`.
-func AddPluginCommandStubs(dockerCli command.Cli, rootCmd *cobra.Command) (err error) {
- pluginCommandStubsOnce.Do(func() {
- var plugins []Plugin
- plugins, err = ListPlugins(dockerCli, rootCmd)
- if err != nil {
- return
- }
- for _, p := range plugins {
- vendor := p.Vendor
- if vendor == "" {
- vendor = "unknown"
- }
- annotations := map[string]string{
- CommandAnnotationPlugin: "true",
- CommandAnnotationPluginVendor: vendor,
- CommandAnnotationPluginVersion: p.Version,
- }
- if p.Err != nil {
- annotations[CommandAnnotationPluginInvalid] = p.Err.Error()
- }
- rootCmd.AddCommand(&cobra.Command{
- Use: p.Name,
- Short: p.ShortDescription,
- Run: func(_ *cobra.Command, _ []string) {},
- Annotations: annotations,
- DisableFlagParsing: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- flags := rootCmd.PersistentFlags()
- flags.SetOutput(nil)
- perr := flags.Parse(args)
- if perr != nil {
- return err
- }
- if flags.Changed("help") {
- cmd.HelpFunc()(rootCmd, args)
- return nil
- }
- return fmt.Errorf("docker: unknown command: docker %s\n\nRun 'docker --help' for more information", cmd.Name())
- },
- ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- // Delegate completion to plugin
- cargs := []string{p.Path, cobra.ShellCompRequestCmd, p.Name}
- cargs = append(cargs, args...)
- cargs = append(cargs, toComplete)
- os.Args = cargs
- runCommand, runErr := PluginRunCommand(dockerCli, p.Name, cmd)
- if runErr != nil {
- return nil, cobra.ShellCompDirectiveError
- }
- runErr = runCommand.Run()
- if runErr == nil {
- os.Exit(0) // plugin already rendered complete data
- }
- return nil, cobra.ShellCompDirectiveError
- },
- })
- }
- })
- return err
-}
-
-const (
- dockerCliAttributePrefix = attribute.Key("docker.cli")
-
- cobraCommandPath = attribute.Key("cobra.command_path")
-)
-
-func getPluginResourceAttributes(cmd *cobra.Command, plugin Plugin) attribute.Set {
- commandPath := cmd.Annotations[CommandAnnotationPluginCommandPath]
- if commandPath == "" {
- commandPath = fmt.Sprintf("%s %s", cmd.CommandPath(), plugin.Name)
- }
-
- attrSet := attribute.NewSet(
- cobraCommandPath.String(commandPath),
- )
-
- kvs := make([]attribute.KeyValue, 0, attrSet.Len())
- for iter := attrSet.Iter(); iter.Next(); {
- attr := iter.Attribute()
- kvs = append(kvs, attribute.KeyValue{
- Key: dockerCliAttributePrefix + "." + attr.Key,
- Value: attr.Value,
- })
- }
- return attribute.NewSet(kvs...)
-}
-
-func appendPluginResourceAttributesEnvvar(env []string, cmd *cobra.Command, plugin Plugin) []string {
- if attrs := getPluginResourceAttributes(cmd, plugin); attrs.Len() > 0 {
- // values in environment variables need to be in baggage format
- // otel/baggage package can be used after update to v1.22, currently it encodes incorrectly
- attrsSlice := make([]string, attrs.Len())
- for iter := attrs.Iter(); iter.Next(); {
- i, v := iter.IndexedAttribute()
- attrsSlice[i] = string(v.Key) + "=" + url.PathEscape(v.Value.AsString())
- }
- env = append(env, ResourceAttributesEnvvar+"="+strings.Join(attrsSlice, ","))
- }
- return env
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/error.go b/vendor/github.com/docker/cli/cli-plugins/manager/error.go
deleted file mode 100644
index cb0bbb5a..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/error.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
-
-package manager
-
-import (
- "github.com/pkg/errors"
-)
-
-// pluginError is set as Plugin.Err by NewPlugin if the plugin
-// candidate fails one of the candidate tests. This exists primarily
-// to implement encoding.TextMarshaller such that rendering a plugin as JSON
-// (e.g. for `docker info -f '{{json .CLIPlugins}}'`) renders the Err
-// field as a useful string and not just `{}`. See
-// https://github.com/golang/go/issues/10748 for some discussion
-// around why the builtin error type doesn't implement this.
-type pluginError struct {
- cause error
-}
-
-// Error satisfies the core error interface for pluginError.
-func (e *pluginError) Error() string {
- return e.cause.Error()
-}
-
-// Cause satisfies the errors.causer interface for pluginError.
-func (e *pluginError) Cause() error {
- return e.cause
-}
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (e *pluginError) Unwrap() error {
- return e.cause
-}
-
-// MarshalText marshalls the pluginError into a textual form.
-func (e *pluginError) MarshalText() (text []byte, err error) {
- return []byte(e.cause.Error()), nil
-}
-
-// wrapAsPluginError wraps an error in a pluginError with an
-// additional message, analogous to errors.Wrapf.
-func wrapAsPluginError(err error, msg string) error {
- if err == nil {
- return nil
- }
- return &pluginError{cause: errors.Wrap(err, msg)}
-}
-
-// NewPluginError creates a new pluginError, analogous to
-// errors.Errorf.
-func NewPluginError(msg string, args ...any) error {
- return &pluginError{cause: errors.Errorf(msg, args...)}
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/hooks.go b/vendor/github.com/docker/cli/cli-plugins/manager/hooks.go
deleted file mode 100644
index 5125c56d..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/hooks.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package manager
-
-import (
- "context"
- "encoding/json"
- "strings"
-
- "github.com/docker/cli/cli-plugins/hooks"
- "github.com/docker/cli/cli/command"
- "github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
-)
-
-// HookPluginData is the type representing the information
-// that plugins declaring support for hooks get passed when
-// being invoked following a CLI command execution.
-type HookPluginData struct {
- // RootCmd is a string representing the matching hook configuration
- // which is currently being invoked. If a hook for `docker context` is
- // configured and the user executes `docker context ls`, the plugin will
- // be invoked with `context`.
- RootCmd string
- Flags map[string]string
- CommandError string
-}
-
-// RunCLICommandHooks is the entrypoint into the hooks execution flow after
-// a main CLI command was executed. It calls the hook subcommand for all
-// present CLI plugins that declare support for hooks in their metadata and
-// parses/prints their responses.
-func RunCLICommandHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, cmdErrorMessage string) {
- commandName := strings.TrimPrefix(subCommand.CommandPath(), rootCmd.Name()+" ")
- flags := getCommandFlags(subCommand)
-
- runHooks(ctx, dockerCli, rootCmd, subCommand, commandName, flags, cmdErrorMessage)
-}
-
-// RunPluginHooks is the entrypoint for the hooks execution flow
-// after a plugin command was just executed by the CLI.
-func RunPluginHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, args []string) {
- commandName := strings.Join(args, " ")
- flags := getNaiveFlags(args)
-
- runHooks(ctx, dockerCli, rootCmd, subCommand, commandName, flags, "")
-}
-
-func runHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, invokedCommand string, flags map[string]string, cmdErrorMessage string) {
- nextSteps := invokeAndCollectHooks(ctx, dockerCli, rootCmd, subCommand, invokedCommand, flags, cmdErrorMessage)
-
- hooks.PrintNextSteps(dockerCli.Err(), nextSteps)
-}
-
-func invokeAndCollectHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCmd *cobra.Command, subCmdStr string, flags map[string]string, cmdErrorMessage string) []string {
- // check if the context was cancelled before invoking hooks
- select {
- case <-ctx.Done():
- return nil
- default:
- }
-
- pluginsCfg := dockerCli.ConfigFile().Plugins
- if pluginsCfg == nil {
- return nil
- }
-
- nextSteps := make([]string, 0, len(pluginsCfg))
- for pluginName, cfg := range pluginsCfg {
- match, ok := pluginMatch(cfg, subCmdStr)
- if !ok {
- continue
- }
-
- p, err := GetPlugin(pluginName, dockerCli, rootCmd)
- if err != nil {
- continue
- }
-
- hookReturn, err := p.RunHook(ctx, HookPluginData{
- RootCmd: match,
- Flags: flags,
- CommandError: cmdErrorMessage,
- })
- if err != nil {
- // skip misbehaving plugins, but don't halt execution
- continue
- }
-
- var hookMessageData hooks.HookMessage
- err = json.Unmarshal(hookReturn, &hookMessageData)
- if err != nil {
- continue
- }
-
- // currently the only hook type
- if hookMessageData.Type != hooks.NextSteps {
- continue
- }
-
- processedHook, err := hooks.ParseTemplate(hookMessageData.Template, subCmd)
- if err != nil {
- continue
- }
-
- var appended bool
- nextSteps, appended = appendNextSteps(nextSteps, processedHook)
- if !appended {
- logrus.Debugf("Plugin %s responded with an empty hook message %q. Ignoring.", pluginName, string(hookReturn))
- }
- }
- return nextSteps
-}
-
-// appendNextSteps appends the processed hook output to the nextSteps slice.
-// If the processed hook output is empty, it is not appended.
-// Empty lines are not stripped if there's at least one non-empty line.
-func appendNextSteps(nextSteps []string, processed []string) ([]string, bool) {
- empty := true
- for _, l := range processed {
- if strings.TrimSpace(l) != "" {
- empty = false
- break
- }
- }
-
- if empty {
- return nextSteps, false
- }
-
- return append(nextSteps, processed...), true
-}
-
-// pluginMatch takes a plugin configuration and a string representing the
-// command being executed (such as 'image ls' – the root 'docker' is omitted)
-// and, if the configuration includes a hook for the invoked command, returns
-// the configured hook string.
-func pluginMatch(pluginCfg map[string]string, subCmd string) (string, bool) {
- configuredPluginHooks, ok := pluginCfg["hooks"]
- if !ok || configuredPluginHooks == "" {
- return "", false
- }
-
- commands := strings.Split(configuredPluginHooks, ",")
- for _, hookCmd := range commands {
- if hookMatch(hookCmd, subCmd) {
- return hookCmd, true
- }
- }
-
- return "", false
-}
-
-func hookMatch(hookCmd, subCmd string) bool {
- hookCmdTokens := strings.Split(hookCmd, " ")
- subCmdTokens := strings.Split(subCmd, " ")
-
- if len(hookCmdTokens) > len(subCmdTokens) {
- return false
- }
-
- for i, v := range hookCmdTokens {
- if v != subCmdTokens[i] {
- return false
- }
- }
-
- return true
-}
-
-func getCommandFlags(cmd *cobra.Command) map[string]string {
- flags := make(map[string]string)
- cmd.Flags().Visit(func(f *pflag.Flag) {
- var fValue string
- if f.Value.Type() == "bool" {
- fValue = f.Value.String()
- }
- flags[f.Name] = fValue
- })
- return flags
-}
-
-// getNaiveFlags string-matches argv and parses them into a map.
-// This is used when calling hooks after a plugin command, since
-// in this case we can't rely on the cobra command tree to parse
-// flags in this case. In this case, no values are ever passed,
-// since we don't have enough information to process them.
-func getNaiveFlags(args []string) map[string]string {
- flags := make(map[string]string)
- for _, arg := range args {
- if strings.HasPrefix(arg, "--") {
- flags[arg[2:]] = ""
- continue
- }
- if strings.HasPrefix(arg, "-") {
- flags[arg[1:]] = ""
- }
- }
- return flags
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/manager.go b/vendor/github.com/docker/cli/cli-plugins/manager/manager.go
deleted file mode 100644
index 9f795bc4..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/manager.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package manager
-
-import (
- "context"
- "os"
- "os/exec"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-
- "github.com/docker/cli/cli/command"
- "github.com/docker/cli/cli/config"
- "github.com/docker/cli/cli/config/configfile"
- "github.com/fvbommel/sortorder"
- "github.com/spf13/cobra"
- "golang.org/x/sync/errgroup"
-)
-
-const (
- // ReexecEnvvar is the name of an ennvar which is set to the command
- // used to originally invoke the docker CLI when executing a
- // plugin. Assuming $PATH and $CWD remain unchanged this should allow
- // the plugin to re-execute the original CLI.
- ReexecEnvvar = "DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"
-
- // ResourceAttributesEnvvar is the name of the envvar that includes additional
- // resource attributes for OTEL.
- ResourceAttributesEnvvar = "OTEL_RESOURCE_ATTRIBUTES"
-)
-
-// errPluginNotFound is the error returned when a plugin could not be found.
-type errPluginNotFound string
-
-func (errPluginNotFound) NotFound() {}
-
-func (e errPluginNotFound) Error() string {
- return "Error: No such CLI plugin: " + string(e)
-}
-
-type notFound interface{ NotFound() }
-
-// IsNotFound is true if the given error is due to a plugin not being found.
-func IsNotFound(err error) bool {
- if e, ok := err.(*pluginError); ok {
- err = e.Cause()
- }
- _, ok := err.(notFound)
- return ok
-}
-
-// getPluginDirs returns the platform-specific locations to search for plugins
-// in order of preference.
-//
-// Plugin-discovery is performed in the following order of preference:
-//
-// 1. The "cli-plugins" directory inside the CLIs [config.Path] (usually "~/.docker/cli-plugins").
-// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
-// 3. Platform-specific defaultSystemPluginDirs.
-//
-// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
-func getPluginDirs(cfg *configfile.ConfigFile) ([]string, error) {
- var pluginDirs []string
-
- if cfg != nil {
- pluginDirs = append(pluginDirs, cfg.CLIPluginsExtraDirs...)
- }
- pluginDir, err := config.Path("cli-plugins")
- if err != nil {
- return nil, err
- }
-
- pluginDirs = append(pluginDirs, pluginDir)
- pluginDirs = append(pluginDirs, defaultSystemPluginDirs...)
- return pluginDirs, nil
-}
-
-func addPluginCandidatesFromDir(res map[string][]string, d string) {
- dentries, err := os.ReadDir(d)
- // Silently ignore any directories which we cannot list (e.g. due to
- // permissions or anything else) or which is not a directory
- if err != nil {
- return
- }
- for _, dentry := range dentries {
- switch dentry.Type() & os.ModeType {
- case 0, os.ModeSymlink:
- // Regular file or symlink, keep going
- default:
- // Something else, ignore.
- continue
- }
- name := dentry.Name()
- if !strings.HasPrefix(name, NamePrefix) {
- continue
- }
- name = strings.TrimPrefix(name, NamePrefix)
- var err error
- if name, err = trimExeSuffix(name); err != nil {
- continue
- }
- res[name] = append(res[name], filepath.Join(d, dentry.Name()))
- }
-}
-
-// listPluginCandidates returns a map from plugin name to the list of (unvalidated) Candidates. The list is in descending order of priority.
-func listPluginCandidates(dirs []string) map[string][]string {
- result := make(map[string][]string)
- for _, d := range dirs {
- addPluginCandidatesFromDir(result, d)
- }
- return result
-}
-
-// GetPlugin returns a plugin on the system by its name
-func GetPlugin(name string, dockerCli command.Cli, rootcmd *cobra.Command) (*Plugin, error) {
- pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
- if err != nil {
- return nil, err
- }
-
- candidates := listPluginCandidates(pluginDirs)
- if paths, ok := candidates[name]; ok {
- if len(paths) == 0 {
- return nil, errPluginNotFound(name)
- }
- c := &candidate{paths[0]}
- p, err := newPlugin(c, rootcmd.Commands())
- if err != nil {
- return nil, err
- }
- if !IsNotFound(p.Err) {
- p.ShadowedPaths = paths[1:]
- }
- return &p, nil
- }
-
- return nil, errPluginNotFound(name)
-}
-
-// ListPlugins produces a list of the plugins available on the system
-func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
- pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
- if err != nil {
- return nil, err
- }
-
- candidates := listPluginCandidates(pluginDirs)
-
- var plugins []Plugin
- var mu sync.Mutex
- eg, _ := errgroup.WithContext(context.TODO())
- cmds := rootcmd.Commands()
- for _, paths := range candidates {
- func(paths []string) {
- eg.Go(func() error {
- if len(paths) == 0 {
- return nil
- }
- c := &candidate{paths[0]}
- p, err := newPlugin(c, cmds)
- if err != nil {
- return err
- }
- if !IsNotFound(p.Err) {
- p.ShadowedPaths = paths[1:]
- mu.Lock()
- defer mu.Unlock()
- plugins = append(plugins, p)
- }
- return nil
- })
- }(paths)
- }
- if err := eg.Wait(); err != nil {
- return nil, err
- }
-
- sort.Slice(plugins, func(i, j int) bool {
- return sortorder.NaturalLess(plugins[i].Name, plugins[j].Name)
- })
-
- return plugins, nil
-}
-
-// PluginRunCommand returns an "os/exec".Cmd which when .Run() will execute the named plugin.
-// The rootcmd argument is referenced to determine the set of builtin commands in order to detect conficts.
-// The error returned satisfies the IsNotFound() predicate if no plugin was found or if the first candidate plugin was invalid somehow.
-func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command) (*exec.Cmd, error) {
- // This uses the full original args, not the args which may
- // have been provided by cobra to our caller. This is because
- // they lack e.g. global options which we must propagate here.
- args := os.Args[1:]
- if !pluginNameRe.MatchString(name) {
- // We treat this as "not found" so that callers will
- // fallback to their "invalid" command path.
- return nil, errPluginNotFound(name)
- }
- exename := addExeSuffix(NamePrefix + name)
- pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
- if err != nil {
- return nil, err
- }
-
- for _, d := range pluginDirs {
- path := filepath.Join(d, exename)
-
- // We stat here rather than letting the exec tell us
- // ENOENT because the latter does not distinguish a
- // file not existing from its dynamic loader or one of
- // its libraries not existing.
- if _, err := os.Stat(path); os.IsNotExist(err) {
- continue
- }
-
- c := &candidate{path: path}
- plugin, err := newPlugin(c, rootcmd.Commands())
- if err != nil {
- return nil, err
- }
- if plugin.Err != nil {
- // TODO: why are we not returning plugin.Err?
- return nil, errPluginNotFound(name)
- }
- cmd := exec.Command(plugin.Path, args...) // #nosec G204 -- ignore "Subprocess launched with a potential tainted input or cmd arguments"
-
- // Using dockerCli.{In,Out,Err}() here results in a hang until something is input.
- // See: - https://github.com/golang/go/issues/10338
- // - https://github.com/golang/go/commit/d000e8742a173aa0659584aa01b7ba2834ba28ab
- // os.Stdin is a *os.File which avoids this behaviour. We don't need the functionality
- // of the wrappers here anyway.
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
-
- cmd.Env = append(cmd.Environ(), ReexecEnvvar+"="+os.Args[0])
- cmd.Env = appendPluginResourceAttributesEnvvar(cmd.Env, rootcmd, plugin)
-
- return cmd, nil
- }
- return nil, errPluginNotFound(name)
-}
-
-// IsPluginCommand checks if the given cmd is a plugin-stub.
-func IsPluginCommand(cmd *cobra.Command) bool {
- return cmd.Annotations[CommandAnnotationPlugin] == "true"
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/manager_unix.go b/vendor/github.com/docker/cli/cli-plugins/manager/manager_unix.go
deleted file mode 100644
index f546dc38..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/manager_unix.go
+++ /dev/null
@@ -1,20 +0,0 @@
-//go:build !windows
-
-package manager
-
-// defaultSystemPluginDirs are the platform-specific locations to search
-// for plugins in order of preference.
-//
-// Plugin-discovery is performed in the following order of preference:
-//
-// 1. The "cli-plugins" directory inside the CLIs config-directory (usually "~/.docker/cli-plugins").
-// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
-// 3. Platform-specific defaultSystemPluginDirs (as defined below).
-//
-// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
-var defaultSystemPluginDirs = []string{
- "/usr/local/lib/docker/cli-plugins",
- "/usr/local/libexec/docker/cli-plugins",
- "/usr/lib/docker/cli-plugins",
- "/usr/libexec/docker/cli-plugins",
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/manager_windows.go b/vendor/github.com/docker/cli/cli-plugins/manager/manager_windows.go
deleted file mode 100644
index e8b5598e..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/manager_windows.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package manager
-
-import (
- "os"
- "path/filepath"
-)
-
-// defaultSystemPluginDirs are the platform-specific locations to search
-// for plugins in order of preference.
-//
-// Plugin-discovery is performed in the following order of preference:
-//
-// 1. The "cli-plugins" directory inside the CLIs config-directory (usually "~/.docker/cli-plugins").
-// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
-// 3. Platform-specific defaultSystemPluginDirs (as defined below).
-//
-// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
-var defaultSystemPluginDirs = []string{
- filepath.Join(os.Getenv("ProgramData"), "Docker", "cli-plugins"),
- filepath.Join(os.Getenv("ProgramFiles"), "Docker", "cli-plugins"),
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/plugin.go b/vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
deleted file mode 100644
index 5576ef43..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package manager
-
-import (
- "context"
- "encoding/json"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "strings"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-)
-
-var pluginNameRe = regexp.MustCompile("^[a-z][a-z0-9]*$")
-
-// Plugin represents a potential plugin with all it's metadata.
-type Plugin struct {
- Metadata
-
- Name string `json:",omitempty"`
- Path string `json:",omitempty"`
-
- // Err is non-nil if the plugin failed one of the candidate tests.
- Err error `json:",omitempty"`
-
- // ShadowedPaths contains the paths of any other plugins which this plugin takes precedence over.
- ShadowedPaths []string `json:",omitempty"`
-}
-
-// newPlugin determines if the given candidate is valid and returns a
-// Plugin. If the candidate fails one of the tests then `Plugin.Err`
-// is set, and is always a `pluginError`, but the `Plugin` is still
-// returned with no error. An error is only returned due to a
-// non-recoverable error.
-func newPlugin(c Candidate, cmds []*cobra.Command) (Plugin, error) {
- path := c.Path()
- if path == "" {
- return Plugin{}, errors.New("plugin candidate path cannot be empty")
- }
-
- // The candidate listing process should have skipped anything
- // which would fail here, so there are all real errors.
- fullname := filepath.Base(path)
- if fullname == "." {
- return Plugin{}, errors.Errorf("unable to determine basename of plugin candidate %q", path)
- }
- var err error
- if fullname, err = trimExeSuffix(fullname); err != nil {
- return Plugin{}, errors.Wrapf(err, "plugin candidate %q", path)
- }
- if !strings.HasPrefix(fullname, NamePrefix) {
- return Plugin{}, errors.Errorf("plugin candidate %q: does not have %q prefix", path, NamePrefix)
- }
-
- p := Plugin{
- Name: strings.TrimPrefix(fullname, NamePrefix),
- Path: path,
- }
-
- // Now apply the candidate tests, so these update p.Err.
- if !pluginNameRe.MatchString(p.Name) {
- p.Err = NewPluginError("plugin candidate %q did not match %q", p.Name, pluginNameRe.String())
- return p, nil
- }
-
- for _, cmd := range cmds {
- // Ignore conflicts with commands which are
- // just plugin stubs (i.e. from a previous
- // call to AddPluginCommandStubs).
- if IsPluginCommand(cmd) {
- continue
- }
- if cmd.Name() == p.Name {
- p.Err = NewPluginError("plugin %q duplicates builtin command", p.Name)
- return p, nil
- }
- if cmd.HasAlias(p.Name) {
- p.Err = NewPluginError("plugin %q duplicates an alias of builtin command %q", p.Name, cmd.Name())
- return p, nil
- }
- }
-
- // We are supposed to check for relevant execute permissions here. Instead we rely on an attempt to execute.
- meta, err := c.Metadata()
- if err != nil {
- p.Err = wrapAsPluginError(err, "failed to fetch metadata")
- return p, nil
- }
-
- if err := json.Unmarshal(meta, &p.Metadata); err != nil {
- p.Err = wrapAsPluginError(err, "invalid metadata")
- return p, nil
- }
- if p.Metadata.SchemaVersion != "0.1.0" {
- p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
- return p, nil
- }
- if p.Metadata.Vendor == "" {
- p.Err = NewPluginError("plugin metadata does not define a vendor")
- return p, nil
- }
- return p, nil
-}
-
-// RunHook executes the plugin's hooks command
-// and returns its unprocessed output.
-func (p *Plugin) RunHook(ctx context.Context, hookData HookPluginData) ([]byte, error) {
- hDataBytes, err := json.Marshal(hookData)
- if err != nil {
- return nil, wrapAsPluginError(err, "failed to marshall hook data")
- }
-
- pCmd := exec.CommandContext(ctx, p.Path, p.Name, HookSubcommandName, string(hDataBytes)) // #nosec G204 -- ignore "Subprocess launched with a potential tainted input or cmd arguments"
- pCmd.Env = os.Environ()
- pCmd.Env = append(pCmd.Env, ReexecEnvvar+"="+os.Args[0])
- hookCmdOutput, err := pCmd.Output()
- if err != nil {
- return nil, wrapAsPluginError(err, "failed to execute plugin hook subcommand")
- }
-
- return hookCmdOutput, nil
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/suffix_unix.go b/vendor/github.com/docker/cli/cli-plugins/manager/suffix_unix.go
deleted file mode 100644
index 050e5020..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/suffix_unix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !windows
-
-package manager
-
-func trimExeSuffix(s string) (string, error) {
- return s, nil
-}
-
-func addExeSuffix(s string) string {
- return s
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/suffix_windows.go b/vendor/github.com/docker/cli/cli-plugins/manager/suffix_windows.go
deleted file mode 100644
index 53b507c8..00000000
--- a/vendor/github.com/docker/cli/cli-plugins/manager/suffix_windows.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package manager
-
-import (
- "path/filepath"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// This is made slightly more complex due to needing to be case insensitive.
-func trimExeSuffix(s string) (string, error) {
- ext := filepath.Ext(s)
- if ext == "" {
- return "", errors.Errorf("path %q lacks required file extension", s)
- }
-
- exe := ".exe"
- if !strings.EqualFold(ext, exe) {
- return "", errors.Errorf("path %q lacks required %q suffix", s, exe)
- }
- return strings.TrimSuffix(s, ext), nil
-}
-
-func addExeSuffix(s string) string {
- return s + ".exe"
-}
diff --git a/vendor/github.com/docker/cli/cli-plugins/metadata/annotations.go b/vendor/github.com/docker/cli/cli-plugins/metadata/annotations.go
new file mode 100644
index 00000000..1c7c6d18
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli-plugins/metadata/annotations.go
@@ -0,0 +1,28 @@
+package metadata
+
+const (
+ // CommandAnnotationPlugin is added to every stub command added by
+ // AddPluginCommandStubs with the value "true" and so can be
+ // used to distinguish plugin stubs from regular commands.
+ CommandAnnotationPlugin = "com.docker.cli.plugin"
+
+ // CommandAnnotationPluginVendor is added to every stub command
+ // added by AddPluginCommandStubs and contains the vendor of
+ // that plugin.
+ CommandAnnotationPluginVendor = "com.docker.cli.plugin.vendor"
+
+ // CommandAnnotationPluginVersion is added to every stub command
+ // added by AddPluginCommandStubs and contains the version of
+ // that plugin.
+ CommandAnnotationPluginVersion = "com.docker.cli.plugin.version"
+
+ // CommandAnnotationPluginInvalid is added to any stub command
+ // added by AddPluginCommandStubs for an invalid command (that
+ // is, one which failed it's candidate test) and contains the
+ // reason for the failure.
+ CommandAnnotationPluginInvalid = "com.docker.cli.plugin-invalid"
+
+ // CommandAnnotationPluginCommandPath is added to overwrite the
+ // command path for a plugin invocation.
+ CommandAnnotationPluginCommandPath = "com.docker.cli.plugin.command_path"
+)
diff --git a/vendor/github.com/docker/cli/cli-plugins/manager/metadata.go b/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go
similarity index 76%
rename from vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
rename to vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go
index f7aac06f..9d408c00 100644
--- a/vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
+++ b/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go
@@ -1,4 +1,4 @@
-package manager
+package metadata
const (
// NamePrefix is the prefix required on all plugin binary names
@@ -13,6 +13,12 @@ const (
// which must be implemented by plugins declaring support
// for hooks in their metadata.
HookSubcommandName = "docker-cli-plugin-hooks"
+
+ // ReexecEnvvar is the name of an ennvar which is set to the command
+ // used to originally invoke the docker CLI when executing a
+ // plugin. Assuming $PATH and $CWD remain unchanged this should allow
+ // the plugin to re-execute the original CLI.
+ ReexecEnvvar = "DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"
)
// Metadata provided by the plugin.
diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go
index aa7dbef4..7a14b6f4 100644
--- a/vendor/github.com/docker/cli/cli/cobra.go
+++ b/vendor/github.com/docker/cli/cli/cobra.go
@@ -3,15 +3,12 @@ package cli
import (
"fmt"
"os"
- "path/filepath"
"sort"
"strings"
- pluginmanager "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/cli/cli-plugins/metadata"
"github.com/docker/cli/cli/command"
cliflags "github.com/docker/cli/cli/flags"
- "github.com/docker/docker/pkg/homedir"
- "github.com/docker/docker/registry"
"github.com/fvbommel/sortorder"
"github.com/moby/term"
"github.com/morikuni/aec"
@@ -62,13 +59,6 @@ func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *c
"docs.code-delimiter": `"`, // https://github.com/docker/cli-docs-tool/blob/77abede22166eaea4af7335096bdcedd043f5b19/annotation/annotation.go#L20-L22
}
- // Configure registry.CertsDir() when running in rootless-mode
- if os.Getenv("ROOTLESSKIT_STATE_DIR") != "" {
- if configHome, err := homedir.GetConfigHome(); err == nil {
- registry.SetCertsDir(filepath.Join(configHome, "docker/certs.d"))
- }
- }
-
return opts, helpCommand
}
@@ -252,7 +242,7 @@ func hasAdditionalHelp(cmd *cobra.Command) bool {
}
func isPlugin(cmd *cobra.Command) bool {
- return pluginmanager.IsPluginCommand(cmd)
+ return cmd.Annotations[metadata.CommandAnnotationPlugin] == "true"
}
func hasAliases(cmd *cobra.Command) bool {
@@ -356,9 +346,9 @@ func decoratedName(cmd *cobra.Command) string {
}
func vendorAndVersion(cmd *cobra.Command) string {
- if vendor, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVendor]; ok && isPlugin(cmd) {
+ if vendor, ok := cmd.Annotations[metadata.CommandAnnotationPluginVendor]; ok && isPlugin(cmd) {
version := ""
- if v, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVersion]; ok && v != "" {
+ if v, ok := cmd.Annotations[metadata.CommandAnnotationPluginVersion]; ok && v != "" {
version = ", " + v
}
return fmt.Sprintf("(%s%s)", vendor, version)
@@ -417,7 +407,7 @@ func invalidPlugins(cmd *cobra.Command) []*cobra.Command {
}
func invalidPluginReason(cmd *cobra.Command) string {
- return cmd.Annotations[pluginmanager.CommandAnnotationPluginInvalid]
+ return cmd.Annotations[metadata.CommandAnnotationPluginInvalid]
}
const usageTemplate = `Usage:
diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go
index 227720fa..1e042ec0 100644
--- a/vendor/github.com/docker/cli/cli/command/cli.go
+++ b/vendor/github.com/docker/cli/cli/command/cli.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package command
@@ -8,7 +8,6 @@ import (
"fmt"
"io"
"os"
- "path/filepath"
"runtime"
"strconv"
"sync"
@@ -21,21 +20,15 @@ import (
"github.com/docker/cli/cli/context/store"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
- manifeststore "github.com/docker/cli/cli/manifest/store"
- registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/streams"
- "github.com/docker/cli/cli/trust"
"github.com/docker/cli/cli/version"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
- "github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
- notaryclient "github.com/theupdateframework/notary/client"
)
const defaultInitTimeout = 2 * time.Second
@@ -53,13 +46,10 @@ type Cli interface {
Streams
SetIn(in *streams.In)
Apply(ops ...CLIOption) error
- ConfigFile() *configfile.ConfigFile
+ config.Provider
ServerInfo() ServerInfo
- NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
DefaultVersion() string
CurrentVersion() string
- ManifestStore() manifeststore.Store
- RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
BuildKitEnabled() (bool, error)
ContextStore() store.Store
@@ -69,7 +59,9 @@ type Cli interface {
}
// DockerCli is an instance the docker command line client.
-// Instances of the client can be returned from NewDockerCli.
+// Instances of the client should be created using the [NewDockerCli]
+// constructor to make sure they are properly initialized with defaults
+// set.
type DockerCli struct {
configFile *configfile.ConfigFile
options *cliflags.ClientOptions
@@ -84,7 +76,7 @@ type DockerCli struct {
init sync.Once
initErr error
dockerEndpoint docker.Endpoint
- contextStoreConfig store.Config
+ contextStoreConfig *store.Config
initTimeout time.Duration
res telemetryResource
@@ -96,7 +88,7 @@ type DockerCli struct {
enableGlobalMeter, enableGlobalTracer bool
}
-// DefaultVersion returns api.defaultVersion.
+// DefaultVersion returns [api.DefaultVersion].
func (*DockerCli) DefaultVersion() string {
return api.DefaultVersion
}
@@ -188,7 +180,7 @@ func (cli *DockerCli) BuildKitEnabled() (bool, error) {
}
si := cli.ServerInfo()
- if si.BuildkitVersion == types.BuilderBuildKit {
+ if si.BuildkitVersion == build.BuilderBuildKit {
// The daemon advertised BuildKit as the preferred builder; this may
// be either a Linux daemon or a Windows daemon with experimental
// BuildKit support enabled.
@@ -202,16 +194,16 @@ func (cli *DockerCli) BuildKitEnabled() (bool, error) {
// HooksEnabled returns whether plugin hooks are enabled.
func (cli *DockerCli) HooksEnabled() bool {
- // legacy support DOCKER_CLI_HINTS env var
- if v := os.Getenv("DOCKER_CLI_HINTS"); v != "" {
+ // use DOCKER_CLI_HOOKS env var value if set and not empty
+ if v := os.Getenv("DOCKER_CLI_HOOKS"); v != "" {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false
}
return enabled
}
- // use DOCKER_CLI_HOOKS env var value if set and not empty
- if v := os.Getenv("DOCKER_CLI_HOOKS"); v != "" {
+ // legacy support DOCKER_CLI_HINTS env var
+ if v := os.Getenv("DOCKER_CLI_HINTS"); v != "" {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false
@@ -230,30 +222,6 @@ func (cli *DockerCli) HooksEnabled() bool {
return false
}
-// ManifestStore returns a store for local manifests
-func (*DockerCli) ManifestStore() manifeststore.Store {
- // TODO: support override default location from config file
- return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
-}
-
-// RegistryClient returns a client for communicating with a Docker distribution
-// registry
-func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
- resolver := func(ctx context.Context, index *registry.IndexInfo) registry.AuthConfig {
- return ResolveAuthConfig(cli.ConfigFile(), index)
- }
- return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
-}
-
-// WithInitializeClient is passed to DockerCli.Initialize by callers who wish to set a particular API Client for use by the CLI.
-func WithInitializeClient(makeClient func(dockerCli *DockerCli) (client.APIClient, error)) CLIOption {
- return func(dockerCli *DockerCli) error {
- var err error
- dockerCli.client, err = makeClient(dockerCli)
- return err
- }
-}
-
// Initialize the dockerCli runs initialization that must happen after command
// line flags are parsed.
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption) error {
@@ -275,13 +243,33 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption)
return errors.New("conflicting options: cannot specify both --host and --context")
}
+ if cli.contextStoreConfig == nil {
+ // This path can be hit when calling Initialize on a DockerCli that's
+ // not constructed through [NewDockerCli]. Using the default context
+ // store without a config set will result in Endpoints from contexts
+ // not being type-mapped correctly, and used as a generic "map[string]any",
+ // instead of a [docker.EndpointMeta].
+ //
+ // When looking up the API endpoint (using [EndpointFromContext]), no
+ // endpoint will be found, and a default, empty endpoint will be used
+ // instead which in its turn, causes newAPIClientFromEndpoint to
+ // be initialized with the default config instead of settings for
+ // the current context (which may mean; connecting with the wrong
+ // endpoint and/or TLS Config to be missing).
+ //
+ // [EndpointFromContext]: https://github.com/docker/cli/blob/33494921b80fd0b5a06acc3a34fa288de4bb2e6b/cli/context/docker/load.go#L139-L149
+ if err := WithDefaultContextStoreConfig()(cli); err != nil {
+ return err
+ }
+ }
+
cli.options = opts
cli.configFile = config.LoadDefaultConfigFile(cli.err)
cli.currentContext = resolveContextName(cli.options, cli.configFile)
cli.contextStore = &ContextStoreWithDefault{
- Store: store.New(config.ContextStoreDir(), cli.contextStoreConfig),
+ Store: store.New(config.ContextStoreDir(), *cli.contextStoreConfig),
Resolver: func() (*DefaultContext, error) {
- return ResolveDefaultContext(cli.options, cli.contextStoreConfig)
+ return ResolveDefaultContext(cli.options, *cli.contextStoreConfig)
},
}
@@ -292,6 +280,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption)
if cli.enableGlobalTracer {
cli.createGlobalTracerProvider(cli.baseCtx)
}
+ filterResourceAttributesEnvvar()
return nil
}
@@ -345,7 +334,10 @@ func resolveDockerEndpoint(s store.Reader, contextName string) (docker.Endpoint,
// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags)
func resolveDefaultDockerEndpoint(opts *cliflags.ClientOptions) (docker.Endpoint, error) {
- host, err := getServerHost(opts.Hosts, opts.TLSOptions)
+ // defaultToTLS determines whether we should use a TLS host as default
+ // if nothing was configured by the user.
+ defaultToTLS := opts.TLSOptions != nil
+ host, err := getServerHost(opts.Hosts, defaultToTLS)
if err != nil {
return docker.Endpoint{}, err
}
@@ -403,11 +395,6 @@ func (cli *DockerCli) initializeFromClient() {
cli.client.NegotiateAPIVersionPing(ping)
}
-// NotaryClient provides a Notary Repository to interact with signed metadata for an image
-func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
- return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
-}
-
// ContextStore returns the ContextStore
func (cli *DockerCli) ContextStore() store.Store {
return cli.contextStore
@@ -523,7 +510,7 @@ func (cli *DockerCli) Apply(ops ...CLIOption) error {
type ServerInfo struct {
HasExperimental bool
OSType string
- BuildkitVersion types.BuilderVersion
+ BuildkitVersion build.BuilderVersion
// SwarmStatus provides information about the current swarm status of the
// engine, obtained from the "Swarm" header in the API response.
@@ -553,18 +540,15 @@ func NewDockerCli(ops ...CLIOption) (*DockerCli, error) {
return cli, nil
}
-func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
- var host string
+func getServerHost(hosts []string, defaultToTLS bool) (string, error) {
switch len(hosts) {
case 0:
- host = os.Getenv(client.EnvOverrideHost)
+ return dopts.ParseHost(defaultToTLS, os.Getenv(client.EnvOverrideHost))
case 1:
- host = hosts[0]
+ return dopts.ParseHost(defaultToTLS, hosts[0])
default:
return "", errors.New("Specify only one -H")
}
-
- return dopts.ParseHost(tlsOptions != nil, host)
}
// UserAgent returns the user agent string used for making API requests
diff --git a/vendor/github.com/docker/cli/cli/command/cli_options.go b/vendor/github.com/docker/cli/cli/command/cli_options.go
index ef133d6a..dd3c9473 100644
--- a/vendor/github.com/docker/cli/cli/command/cli_options.go
+++ b/vendor/github.com/docker/cli/cli/command/cli_options.go
@@ -11,7 +11,6 @@ import (
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/client"
- "github.com/docker/docker/errdefs"
"github.com/moby/term"
"github.com/pkg/errors"
)
@@ -101,7 +100,8 @@ func WithContentTrust(enabled bool) CLIOption {
// WithDefaultContextStoreConfig configures the cli to use the default context store configuration.
func WithDefaultContextStoreConfig() CLIOption {
return func(cli *DockerCli) error {
- cli.contextStoreConfig = DefaultContextStoreConfig()
+ cfg := DefaultContextStoreConfig()
+ cli.contextStoreConfig = &cfg
return nil
}
}
@@ -114,6 +114,18 @@ func WithAPIClient(c client.APIClient) CLIOption {
}
}
+// WithInitializeClient is passed to [DockerCli.Initialize] to initialize
+// an API Client for use by the CLI.
+func WithInitializeClient(makeClient func(*DockerCli) (client.APIClient, error)) CLIOption {
+ return func(cli *DockerCli) error {
+ c, err := makeClient(cli)
+ if err != nil {
+ return err
+ }
+ return WithAPIClient(c)(cli)
+ }
+}
+
// envOverrideHTTPHeaders is the name of the environment-variable that can be
// used to set custom HTTP headers to be sent by the client. This environment
// variable is the equivalent to the HttpHeaders field in the configuration
@@ -177,7 +189,7 @@ func withCustomHeadersFromEnv() client.Opt {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
- return errdefs.InvalidParameter(errors.Errorf(
+ return invalidParameter(errors.Errorf(
"failed to parse custom headers from %s environment variable: value must be formatted as comma-separated key=value pairs",
envOverrideHTTPHeaders,
))
@@ -194,7 +206,7 @@ func withCustomHeadersFromEnv() client.Opt {
k = strings.TrimSpace(k)
if k == "" {
- return errdefs.InvalidParameter(errors.Errorf(
+ return invalidParameter(errors.Errorf(
`failed to set custom headers from %s environment variable: value contains a key=value pair with an empty key: '%s'`,
envOverrideHTTPHeaders, kv,
))
@@ -205,7 +217,7 @@ func withCustomHeadersFromEnv() client.Opt {
// from an environment variable with the same name). In the meantime,
// produce an error to prevent users from depending on this.
if !hasValue {
- return errdefs.InvalidParameter(errors.Errorf(
+ return invalidParameter(errors.Errorf(
`failed to set custom headers from %s environment variable: missing "=" in key=value pair: '%s'`,
envOverrideHTTPHeaders, kv,
))
diff --git a/vendor/github.com/docker/cli/cli/command/context.go b/vendor/github.com/docker/cli/cli/command/context.go
index 404a6a13..64e88e44 100644
--- a/vendor/github.com/docker/cli/cli/command/context.go
+++ b/vendor/github.com/docker/cli/cli/command/context.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package command
diff --git a/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
index c5b310e9..9b49b3af 100644
--- a/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
+++ b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package command
@@ -7,7 +7,6 @@ import (
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags"
- "github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
@@ -117,7 +116,7 @@ func (s *ContextStoreWithDefault) List() ([]store.Metadata, error) {
// CreateOrUpdate is not allowed for the default context and fails
func (s *ContextStoreWithDefault) CreateOrUpdate(meta store.Metadata) error {
if meta.Name == DefaultContextName {
- return errdefs.InvalidParameter(errors.New("default context cannot be created nor updated"))
+ return invalidParameter(errors.New("default context cannot be created nor updated"))
}
return s.Store.CreateOrUpdate(meta)
}
@@ -125,7 +124,7 @@ func (s *ContextStoreWithDefault) CreateOrUpdate(meta store.Metadata) error {
// Remove is not allowed for the default context and fails
func (s *ContextStoreWithDefault) Remove(name string) error {
if name == DefaultContextName {
- return errdefs.InvalidParameter(errors.New("default context cannot be removed"))
+ return invalidParameter(errors.New("default context cannot be removed"))
}
return s.Store.Remove(name)
}
@@ -145,7 +144,7 @@ func (s *ContextStoreWithDefault) GetMetadata(name string) (store.Metadata, erro
// ResetTLSMaterial is not implemented for default context and fails
func (s *ContextStoreWithDefault) ResetTLSMaterial(name string, data *store.ContextTLSData) error {
if name == DefaultContextName {
- return errdefs.InvalidParameter(errors.New("default context cannot be edited"))
+ return invalidParameter(errors.New("default context cannot be edited"))
}
return s.Store.ResetTLSMaterial(name, data)
}
@@ -153,7 +152,7 @@ func (s *ContextStoreWithDefault) ResetTLSMaterial(name string, data *store.Cont
// ResetEndpointTLSMaterial is not implemented for default context and fails
func (s *ContextStoreWithDefault) ResetEndpointTLSMaterial(contextName string, endpointName string, data *store.EndpointTLSData) error {
if contextName == DefaultContextName {
- return errdefs.InvalidParameter(errors.New("default context cannot be edited"))
+ return invalidParameter(errors.New("default context cannot be edited"))
}
return s.Store.ResetEndpointTLSMaterial(contextName, endpointName, data)
}
@@ -186,7 +185,7 @@ func (s *ContextStoreWithDefault) GetTLSData(contextName, endpointName, fileName
return nil, err
}
if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil {
- return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName))
+ return nil, notFound(errors.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName))
}
return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil
}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go b/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go
index 71f80c0c..ade5de73 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go
@@ -6,8 +6,7 @@ import (
"strings"
"time"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/go-units"
)
@@ -52,7 +51,7 @@ shared: {{.Shared}}
return Format(source)
}
-func buildCacheSort(buildCache []*types.BuildCache) {
+func buildCacheSort(buildCache []*build.CacheRecord) {
sort.Slice(buildCache, func(i, j int) bool {
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
switch {
@@ -71,7 +70,7 @@ func buildCacheSort(buildCache []*types.BuildCache) {
}
// BuildCacheWrite renders the context for a list of containers
-func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
+func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error {
render := func(format func(subContext SubContext) error) error {
buildCacheSort(buildCaches)
for _, bc := range buildCaches {
@@ -88,7 +87,7 @@ func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
type buildCacheContext struct {
HeaderContext
trunc bool
- v *types.BuildCache
+ v *build.CacheRecord
}
func newBuildCacheContext() *buildCacheContext {
@@ -115,7 +114,7 @@ func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
func (c *buildCacheContext) ID() string {
id := c.v.ID
if c.trunc {
- id = stringid.TruncateID(c.v.ID)
+ id = TruncateID(c.v.ID)
}
if c.v.InUse {
return id + "*"
@@ -131,7 +130,7 @@ func (c *buildCacheContext) Parent() string {
parent = c.v.Parent //nolint:staticcheck // Ignore SA1019: Field was deprecated in API v1.42, but kept for backward compatibility
}
if c.trunc {
- return stringid.TruncateID(parent)
+ return TruncateID(parent)
}
return parent
}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/container.go b/vendor/github.com/docker/cli/cli/command/formatter/container.go
index ba62efb2..0a5c587a 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/container.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/container.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package formatter
@@ -11,10 +11,11 @@ import (
"strings"
"time"
+ "github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/container"
- "github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
@@ -26,8 +27,18 @@ const (
mountsHeader = "MOUNTS"
localVolumes = "LOCAL VOLUMES"
networksHeader = "NETWORKS"
+ platformHeader = "PLATFORM"
)
+// Platform wraps a [ocispec.Platform] to implement the stringer interface.
+type Platform struct {
+ ocispec.Platform
+}
+
+func (p Platform) String() string {
+ return platforms.FormatAll(p.Platform)
+}
+
// NewContainerFormat returns a Format for rendering using a Context
func NewContainerFormat(source string, quiet bool, size bool) Format {
switch source {
@@ -68,16 +79,14 @@ ports: {{- pad .Ports 1 0}}
// ContainerWrite renders the context for a list of containers
func ContainerWrite(ctx Context, containers []container.Summary) error {
- render := func(format func(subContext SubContext) error) error {
+ return ctx.Write(NewContainerContext(), func(format func(subContext SubContext) error) error {
for _, ctr := range containers {
- err := format(&ContainerContext{trunc: ctx.Trunc, c: ctr})
- if err != nil {
+ if err := format(&ContainerContext{trunc: ctx.Trunc, c: ctr}); err != nil {
return err
}
}
return nil
- }
- return ctx.Write(NewContainerContext(), render)
+ })
}
// ContainerContext is a struct used for rendering a list of containers in a Go template.
@@ -111,6 +120,7 @@ func NewContainerContext() *ContainerContext {
"Mounts": mountsHeader,
"LocalVolumes": localVolumes,
"Networks": networksHeader,
+ "Platform": platformHeader,
}
return &containerCtx
}
@@ -124,7 +134,7 @@ func (c *ContainerContext) MarshalJSON() ([]byte, error) {
// option being set, the full or truncated ID is returned.
func (c *ContainerContext) ID() string {
if c.trunc {
- return stringid.TruncateID(c.c.ID)
+ return TruncateID(c.c.ID)
}
return c.c.ID
}
@@ -161,7 +171,7 @@ func (c *ContainerContext) Image() string {
return ""
}
if c.trunc {
- if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) {
+ if trunc := TruncateID(c.c.ImageID); trunc == TruncateID(c.c.Image) {
return trunc
}
// truncate digest if no-trunc option was not selected
@@ -210,6 +220,16 @@ func (c *ContainerContext) RunningFor() string {
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
}
+// Platform returns a human-readable representation of the container's
+// platform if it is available.
+func (c *ContainerContext) Platform() *Platform {
+ p := c.c.ImageManifestDescriptor
+ if p == nil || p.Platform == nil {
+ return nil
+ }
+ return &Platform{*p.Platform}
+}
+
// Ports returns a comma-separated string representing open ports of the container
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
// it's used by command 'docker ps'
@@ -218,7 +238,8 @@ func (c *ContainerContext) Ports() string {
return DisplayablePorts(c.c.Ports)
}
-// State returns the container's current state (e.g. "running" or "paused")
+// State returns the container's current state (e.g. "running" or "paused").
+// Refer to [container.ContainerState] for possible states.
func (c *ContainerContext) State() string {
return c.c.State
}
@@ -255,6 +276,7 @@ func (c *ContainerContext) Labels() string {
for k, v := range c.c.Labels {
joinLabels = append(joinLabels, k+"="+v)
}
+ sort.Strings(joinLabels)
return strings.Join(joinLabels, ",")
}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/context.go b/vendor/github.com/docker/cli/cli/command/formatter/context.go
index 293c8415..985a6ff3 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/context.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/context.go
@@ -1,7 +1,5 @@
package formatter
-import "encoding/json"
-
const (
// ClientContextTableFormat is the default client context format.
ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.Error}}"
@@ -30,13 +28,6 @@ type ClientContext struct {
DockerEndpoint string
Current bool
Error string
-
- // ContextType is a temporary field for compatibility with
- // Visual Studio, which depends on this from the "cloud integration"
- // wrapper.
- //
- // Deprecated: this type is only for backward-compatibility. Do not use.
- ContextType string `json:"ContextType,omitempty"`
}
// ClientContextWrite writes formatted contexts using the Context
@@ -69,13 +60,6 @@ func newClientContextContext() *clientContextContext {
}
func (c *clientContextContext) MarshalJSON() ([]byte, error) {
- if c.c.ContextType != "" {
- // We only have ContextType set for plain "json" or "{{json .}}" formatting,
- // so we should be able to just use the default json.Marshal with no
- // special handling.
- return json.Marshal(c.c)
- }
- // FIXME(thaJeztah): why do we need a special marshal function here?
return MarshalJSON(c)
}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/custom.go b/vendor/github.com/docker/cli/cli/command/formatter/custom.go
index 6910a261..c2b9cb2c 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/custom.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/custom.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package formatter
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go
index 1199d571..b663c59b 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go
@@ -4,15 +4,14 @@ import (
"bytes"
"fmt"
"strconv"
- "strings"
"text/template"
"github.com/distribution/reference"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/volume"
- units "github.com/docker/go-units"
+ "github.com/docker/go-units"
)
const (
@@ -39,12 +38,12 @@ type DiskUsageContext struct {
Images []*image.Summary
Containers []*container.Summary
Volumes []*volume.Volume
- BuildCache []*types.BuildCache
+ BuildCache []*build.CacheRecord
BuilderSize int64
}
func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
- ctx.buffer = bytes.NewBufferString("")
+ ctx.buffer = &bytes.Buffer{}
ctx.header = ""
ctx.Format = Format(format)
ctx.preFormat()
@@ -88,7 +87,7 @@ func (ctx *DiskUsageContext) Write() (err error) {
if ctx.Verbose {
return ctx.verboseWrite()
}
- ctx.buffer = bytes.NewBufferString("")
+ ctx.buffer = &bytes.Buffer{}
ctx.preFormat()
tmpl, err := ctx.parseFormat()
@@ -330,9 +329,15 @@ func (c *diskUsageContainersContext) TotalCount() string {
}
func (*diskUsageContainersContext) isActive(ctr container.Summary) bool {
- return strings.Contains(ctr.State, "running") ||
- strings.Contains(ctr.State, "paused") ||
- strings.Contains(ctr.State, "restarting")
+ switch ctr.State {
+ case container.StateRunning, container.StatePaused, container.StateRestarting:
+ return true
+ case container.StateCreated, container.StateRemoving, container.StateExited, container.StateDead:
+ return false
+ default:
+ // Unknown state (should never happen).
+ return false
+ }
}
func (c *diskUsageContainersContext) Active() string {
@@ -436,7 +441,7 @@ func (c *diskUsageVolumesContext) Reclaimable() string {
type diskUsageBuilderContext struct {
HeaderContext
builderSize int64
- buildCache []*types.BuildCache
+ buildCache []*build.CacheRecord
}
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go b/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go
index 7847bb30..b062c339 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go
@@ -1,6 +1,11 @@
+// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
+//go:build go1.23
+
package formatter
import (
+ "fmt"
+ "strings"
"unicode/utf8"
"golang.org/x/text/width"
@@ -15,11 +20,32 @@ func charWidth(r rune) int {
switch width.LookupRune(r).Kind() {
case width.EastAsianWide, width.EastAsianFullwidth:
return 2
+ case width.Neutral, width.EastAsianAmbiguous, width.EastAsianNarrow, width.EastAsianHalfwidth:
+ return 1
default:
return 1
}
}
+const shortLen = 12
+
+// TruncateID returns a shorthand version of a string identifier for presentation,
+// after trimming digest algorithm prefix (if any).
+//
+// This function is a copy of [stringid.TruncateID] for presentation / formatting
+// purposes.
+//
+// [stringid.TruncateID]: https://github.com/moby/moby/blob/v28.3.2/pkg/stringid/stringid.go#L19
+func TruncateID(id string) string {
+ if i := strings.IndexRune(id, ':'); i >= 0 {
+ id = id[i+1:]
+ }
+ if len(id) > shortLen {
+ id = id[:shortLen]
+ }
+ return id
+}
+
// Ellipsis truncates a string to fit within maxDisplayWidth, and appends ellipsis (…).
// For maxDisplayWidth of 1 and lower, no ellipsis is appended.
// For maxDisplayWidth of 1, first char of string will return even if its width > 1.
@@ -59,3 +85,27 @@ func Ellipsis(s string, maxDisplayWidth int) string {
}
return s
}
+
+// capitalizeFirst capitalizes the first character of string
+func capitalizeFirst(s string) string {
+ switch l := len(s); l {
+ case 0:
+ return s
+ case 1:
+ return strings.ToLower(s)
+ default:
+ return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
+ }
+}
+
+// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
+func PrettyPrint(i any) string {
+ switch t := i.(type) {
+ case nil:
+ return "None"
+ case string:
+ return capitalizeFirst(t)
+ default:
+ return capitalizeFirst(fmt.Sprintf("%s", t))
+ }
+}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/formatter.go b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go
index 5873cce8..7803cabe 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/formatter.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package formatter
@@ -76,12 +76,15 @@ func (c *Context) preFormat() {
func (c *Context) parseFormat() (*template.Template, error) {
tmpl, err := templates.Parse(c.finalFormat)
if err != nil {
- return tmpl, errors.Wrap(err, "template parsing error")
+ return nil, errors.Wrap(err, "template parsing error")
}
- return tmpl, err
+ return tmpl, nil
}
func (c *Context) postFormat(tmpl *template.Template, subContext SubContext) {
+ if c.Output == nil {
+ c.Output = io.Discard
+ }
if c.Format.IsTable() {
t := tabwriter.NewWriter(c.Output, 10, 1, 3, ' ', 0)
buffer := bytes.NewBufferString("")
@@ -111,7 +114,7 @@ type SubFormat func(func(SubContext) error) error
// Write the template to the buffer using this Context
func (c *Context) Write(sub SubContext, f SubFormat) error {
- c.buffer = bytes.NewBufferString("")
+ c.buffer = &bytes.Buffer{}
c.preFormat()
tmpl, err := c.parseFormat()
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/image.go b/vendor/github.com/docker/cli/cli/command/formatter/image.go
index d16f42b5..74c2fe75 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/image.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/image.go
@@ -6,8 +6,7 @@ import (
"github.com/distribution/reference"
"github.com/docker/docker/api/types/image"
- "github.com/docker/docker/pkg/stringid"
- units "github.com/docker/go-units"
+ "github.com/docker/go-units"
)
const (
@@ -216,7 +215,7 @@ func (c *imageContext) MarshalJSON() ([]byte, error) {
func (c *imageContext) ID() string {
if c.trunc {
- return stringid.TruncateID(c.i.ID)
+ return TruncateID(c.i.ID)
}
return c.i.ID
}
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/reflect.go b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go
index fe8def61..31658337 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/reflect.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package formatter
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/tabwriter/tabwriter.go b/vendor/github.com/docker/cli/cli/command/formatter/tabwriter/tabwriter.go
index 1d908f58..e7473cd9 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/tabwriter/tabwriter.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/tabwriter/tabwriter.go
@@ -12,7 +12,7 @@
// based on https://github.com/golang/go/blob/master/src/text/tabwriter/tabwriter.go Last modified 690ac40 on 31 Jan
-//nolint:gocyclo,nakedret,stylecheck,unused // ignore linting errors, so that we can stick close to upstream
+//nolint:gocyclo,nakedret,unused // ignore linting errors, so that we can stick close to upstream
package tabwriter
import (
diff --git a/vendor/github.com/docker/cli/cli/command/formatter/volume.go b/vendor/github.com/docker/cli/cli/command/formatter/volume.go
index 85f07079..bf9ea5d4 100644
--- a/vendor/github.com/docker/cli/cli/command/formatter/volume.go
+++ b/vendor/github.com/docker/cli/cli/command/formatter/volume.go
@@ -6,7 +6,7 @@ import (
"strings"
"github.com/docker/docker/api/types/volume"
- units "github.com/docker/go-units"
+ "github.com/docker/go-units"
)
const (
diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go
index e2581d57..be49d85b 100644
--- a/vendor/github.com/docker/cli/cli/command/registry.go
+++ b/vendor/github.com/docker/cli/cli/command/registry.go
@@ -13,9 +13,9 @@ import (
configtypes "github.com/docker/cli/cli/config/types"
"github.com/docker/cli/cli/hints"
"github.com/docker/cli/cli/streams"
+ "github.com/docker/cli/internal/prompt"
"github.com/docker/cli/internal/tui"
registrytypes "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/registry"
"github.com/morikuni/aec"
"github.com/pkg/errors"
)
@@ -28,16 +28,22 @@ const (
"for organizations using SSO. Learn more at https://docs.docker.com/go/access-tokens/"
)
+// authConfigKey is the key used to store credentials for Docker Hub. It is
+// a copy of [registry.IndexServer].
+//
+// [registry.IndexServer]: https://pkg.go.dev/github.com/docker/docker/registry#IndexServer
+const authConfigKey = "https://index.docker.io/v1/"
+
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
-// for the given command.
+// for the given command to prompt the user for username and password.
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) registrytypes.RequestAuthConfig {
+ configKey := getAuthConfigKey(index.Name)
+ isDefaultRegistry := configKey == authConfigKey || index.Official
return func(ctx context.Context) (string, error) {
_, _ = fmt.Fprintf(cli.Out(), "\nLogin prior to %s:\n", cmdName)
- indexServer := registry.GetAuthConfigKey(index)
- isDefaultRegistry := indexServer == registry.IndexServer
- authConfig, err := GetDefaultAuthConfig(cli.ConfigFile(), true, indexServer, isDefaultRegistry)
+ authConfig, err := GetDefaultAuthConfig(cli.ConfigFile(), true, configKey, isDefaultRegistry)
if err != nil {
- _, _ = fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
+ _, _ = fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", configKey, err)
}
select {
@@ -46,7 +52,7 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
default:
}
- authConfig, err = PromptUserForCredentials(ctx, cli, "", "", authConfig.Username, indexServer)
+ authConfig, err = PromptUserForCredentials(ctx, cli, "", "", authConfig.Username, configKey)
if err != nil {
return "", err
}
@@ -63,7 +69,7 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
func ResolveAuthConfig(cfg *configfile.ConfigFile, index *registrytypes.IndexInfo) registrytypes.AuthConfig {
configKey := index.Name
if index.Official {
- configKey = registry.IndexServer
+ configKey = authConfigKey
}
a, _ := cfg.GetAuthConfig(configKey)
@@ -132,7 +138,7 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword
argUser = strings.TrimSpace(argUser)
if argUser == "" {
- if serverAddress == registry.IndexServer {
+ if serverAddress == authConfigKey {
// When signing in to the default (Docker Hub) registry, we display
// hints for creating an account, and (if hints are enabled), using
// a token instead of a password.
@@ -143,16 +149,16 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword
}
}
- var prompt string
+ var msg string
defaultUsername = strings.TrimSpace(defaultUsername)
if defaultUsername == "" {
- prompt = "Username: "
+ msg = "Username: "
} else {
- prompt = fmt.Sprintf("Username (%s): ", defaultUsername)
+ msg = fmt.Sprintf("Username (%s): ", defaultUsername)
}
var err error
- argUser, err = PromptForInput(ctx, cli.In(), cli.Out(), prompt)
+ argUser, err = prompt.ReadInput(ctx, cli.In(), cli.Out(), msg)
if err != nil {
return registrytypes.AuthConfig{}, err
}
@@ -166,7 +172,7 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword
argPassword = strings.TrimSpace(argPassword)
if argPassword == "" {
- restoreInput, err := DisableInputEcho(cli.In())
+ restoreInput, err := prompt.DisableInputEcho(cli.In())
if err != nil {
return registrytypes.AuthConfig{}, err
}
@@ -180,10 +186,13 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword
}
}()
- out := tui.NewOutput(cli.Err())
- out.PrintNote("A Personal Access Token (PAT) can be used instead.\n" +
- "To create a PAT, visit " + aec.Underline.Apply("https://app.docker.com/settings") + "\n\n")
- argPassword, err = PromptForInput(ctx, cli.In(), cli.Out(), "Password: ")
+ if serverAddress == authConfigKey {
+ out := tui.NewOutput(cli.Err())
+ out.PrintNote("A Personal Access Token (PAT) can be used instead.\n" +
+ "To create a PAT, visit " + aec.Underline.Apply("https://app.docker.com/settings") + "\n\n")
+ }
+
+ argPassword, err = prompt.ReadInput(ctx, cli.In(), cli.Out(), "Password: ")
if err != nil {
return registrytypes.AuthConfig{}, err
}
@@ -225,9 +234,25 @@ func resolveAuthConfigFromImage(cfg *configfile.ConfigFile, image string) (regis
if err != nil {
return registrytypes.AuthConfig{}, err
}
- repoInfo, err := registry.ParseRepositoryInfo(registryRef)
+ configKey := getAuthConfigKey(reference.Domain(registryRef))
+ a, err := cfg.GetAuthConfig(configKey)
if err != nil {
return registrytypes.AuthConfig{}, err
}
- return ResolveAuthConfig(cfg, repoInfo.Index), nil
+ return registrytypes.AuthConfig(a), nil
+}
+
+// getAuthConfigKey special-cases using the full index address of the official
+// index as the AuthConfig key, and uses the (host)name[:port] for private indexes.
+//
+// It is similar to [registry.GetAuthConfigKey], but does not require on
+// [registrytypes.IndexInfo] as intermediate.
+//
+// [registry.GetAuthConfigKey]: https://pkg.go.dev/github.com/docker/docker/registry#GetAuthConfigKey
+// [registrytypes.IndexInfo]:https://pkg.go.dev/github.com/docker/docker/api/types/registry#IndexInfo
+func getAuthConfigKey(domainName string) string {
+ if domainName == "docker.io" || domainName == "index.docker.io" {
+ return authConfigKey
+ }
+ return domainName
}
diff --git a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go
index 09da1877..5f87e291 100644
--- a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go
+++ b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go
@@ -11,13 +11,12 @@ import (
"strings"
"time"
- "github.com/docker/docker/api/types"
+ "github.com/docker/cli/cli/command/formatter"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
- "github.com/docker/docker/pkg/stringid"
)
var (
@@ -89,7 +88,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID
)
for {
- service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
+ service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, swarm.ServiceInspectOptions{})
if err != nil {
return err
}
@@ -143,7 +142,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID
return nil
}
- tasks, err := apiClient.TaskList(ctx, types.TaskListOptions{Filters: filters.NewArgs(
+ tasks, err := apiClient.TaskList(ctx, swarm.TaskListOptions{Filters: filters.NewArgs(
filters.KeyValuePair{Key: "service", Value: service.ID},
filters.KeyValuePair{Key: "_up-to-date", Value: "true"},
)})
@@ -217,7 +216,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID
//
// TODO(thaJeztah): this should really be a filter on [apiClient.NodeList] instead of being filtered on the client side.
func getActiveNodes(ctx context.Context, apiClient client.NodeAPIClient) (map[string]struct{}, error) {
- nodes, err := apiClient.NodeList(ctx, types.NodeListOptions{})
+ nodes, err := apiClient.NodeList(ctx, swarm.NodeListOptions{})
if err != nil {
return nil, err
}
@@ -506,7 +505,7 @@ func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int
if task.Status.Err != "" {
u.progressOut.WriteProgress(progress.Progress{
- ID: stringid.TruncateID(task.NodeID),
+ ID: formatter.TruncateID(task.NodeID),
Action: truncError(task.Status.Err),
})
return
@@ -514,7 +513,7 @@ func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int
if !terminalState(task.DesiredState) && !terminalState(task.Status.State) {
u.progressOut.WriteProgress(progress.Progress{
- ID: stringid.TruncateID(task.NodeID),
+ ID: formatter.TruncateID(task.NodeID),
Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
Current: numberedStates[task.Status.State],
Total: maxProgress,
diff --git a/vendor/github.com/docker/cli/cli/command/telemetry.go b/vendor/github.com/docker/cli/cli/command/telemetry.go
index 2ee8adfb..e8e6296b 100644
--- a/vendor/github.com/docker/cli/cli/command/telemetry.go
+++ b/vendor/github.com/docker/cli/cli/command/telemetry.go
@@ -4,10 +4,11 @@ import (
"context"
"os"
"path/filepath"
+ "strings"
"sync"
"time"
- "github.com/docker/distribution/uuid"
+ "github.com/google/uuid"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
@@ -142,7 +143,7 @@ func defaultResourceOptions() []resource.Option {
// of the CLI is its own instance. Without this, downstream
// OTEL processors may think the same process is restarting
// continuously.
- semconv.ServiceInstanceID(uuid.Generate().String()),
+ semconv.ServiceInstanceID(uuid.NewString()),
),
resource.WithFromEnv(),
resource.WithTelemetrySDK(),
@@ -216,3 +217,49 @@ func (r *cliReader) ForceFlush(ctx context.Context) error {
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality {
return metricdata.DeltaTemporality
}
+
+// resourceAttributesEnvVar is the name of the envvar that includes additional
+// resource attributes for OTEL as defined in the [OpenTelemetry specification].
+//
+// [OpenTelemetry specification]: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#general-sdk-configuration
+const resourceAttributesEnvVar = "OTEL_RESOURCE_ATTRIBUTES"
+
+func filterResourceAttributesEnvvar() {
+ if v := os.Getenv(resourceAttributesEnvVar); v != "" {
+ if filtered := filterResourceAttributes(v); filtered != "" {
+ _ = os.Setenv(resourceAttributesEnvVar, filtered)
+ } else {
+ _ = os.Unsetenv(resourceAttributesEnvVar)
+ }
+ }
+}
+
+// dockerCLIAttributePrefix is the prefix for any docker cli OTEL attributes.
+// When updating, make sure to also update the copy in cli-plugins/manager.
+//
+// TODO(thaJeztah): move telemetry-related code to an (internal) package to reduce dependency on cli/command in cli-plugins, which has too many imports.
+const dockerCLIAttributePrefix = "docker.cli."
+
+func filterResourceAttributes(s string) string {
+ if trimmed := strings.TrimSpace(s); trimmed == "" {
+ return trimmed
+ }
+
+ pairs := strings.Split(s, ",")
+ elems := make([]string, 0, len(pairs))
+ for _, p := range pairs {
+ k, _, found := strings.Cut(p, "=")
+ if !found {
+ // Do not interact with invalid otel resources.
+ elems = append(elems, p)
+ continue
+ }
+
+ // Skip attributes that have our docker.cli prefix.
+ if strings.HasPrefix(k, dockerCLIAttributePrefix) {
+ continue
+ }
+ elems = append(elems, p)
+ }
+ return strings.Join(elems, ",")
+}
diff --git a/vendor/github.com/docker/cli/cli/command/telemetry_docker.go b/vendor/github.com/docker/cli/cli/command/telemetry_docker.go
index 298209e2..6598997d 100644
--- a/vendor/github.com/docker/cli/cli/command/telemetry_docker.go
+++ b/vendor/github.com/docker/cli/cli/command/telemetry_docker.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package command
diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go
index 8a8368fb..ab64ef8f 100644
--- a/vendor/github.com/docker/cli/cli/command/utils.go
+++ b/vendor/github.com/docker/cli/cli/command/utils.go
@@ -1,95 +1,45 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package command
import (
- "bufio"
"context"
- "fmt"
"io"
"os"
"path/filepath"
- "runtime"
"strings"
+ "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/streams"
+ "github.com/docker/cli/internal/prompt"
"github.com/docker/docker/api/types/filters"
- mounttypes "github.com/docker/docker/api/types/mount"
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
- "github.com/moby/sys/sequential"
- "github.com/moby/term"
+ "github.com/moby/sys/atomicwriter"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// CopyToFile writes the content of the reader to the specified file
+//
+// Deprecated: use [atomicwriter.New].
func CopyToFile(outfile string, r io.Reader) error {
- // We use sequential file access here to avoid depleting the standby list
- // on Windows. On Linux, this is a call directly to os.CreateTemp
- tmpFile, err := sequential.CreateTemp(filepath.Dir(outfile), ".docker_temp_")
+ writer, err := atomicwriter.New(outfile, 0o600)
if err != nil {
return err
}
-
- tmpPath := tmpFile.Name()
-
- _, err = io.Copy(tmpFile, r)
- tmpFile.Close()
-
- if err != nil {
- os.Remove(tmpPath)
- return err
- }
-
- if err = os.Rename(tmpPath, outfile); err != nil {
- os.Remove(tmpPath)
- return err
- }
-
- return nil
+ defer writer.Close()
+ _, err = io.Copy(writer, r)
+ return err
}
-// capitalizeFirst capitalizes the first character of string
-func capitalizeFirst(s string) string {
- switch l := len(s); l {
- case 0:
- return s
- case 1:
- return strings.ToLower(s)
- default:
- return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
- }
-}
-
-// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
-func PrettyPrint(i any) string {
- switch t := i.(type) {
- case nil:
- return "None"
- case string:
- return capitalizeFirst(t)
- default:
- return capitalizeFirst(fmt.Sprintf("%s", t))
- }
-}
-
-var ErrPromptTerminated = errdefs.Cancelled(errors.New("prompt terminated"))
+const ErrPromptTerminated = prompt.ErrTerminated
// DisableInputEcho disables input echo on the provided streams.In.
// This is useful when the user provides sensitive information like passwords.
// The function returns a restore function that should be called to restore the
// terminal state.
func DisableInputEcho(ins *streams.In) (restore func() error, err error) {
- oldState, err := term.SaveState(ins.FD())
- if err != nil {
- return nil, err
- }
- restore = func() error {
- return term.RestoreTerminal(ins.FD(), oldState)
- }
- return restore, term.DisableEcho(ins.FD(), oldState)
+ return prompt.DisableInputEcho(ins)
}
// PromptForInput requests input from the user.
@@ -100,23 +50,7 @@ func DisableInputEcho(ins *streams.In) (restore func() error, err error) {
// the stack and close the io.Reader used for the prompt which will prevent the
// background goroutine from blocking indefinitely.
func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message string) (string, error) {
- _, _ = fmt.Fprint(out, message)
-
- result := make(chan string)
- go func() {
- scanner := bufio.NewScanner(in)
- if scanner.Scan() {
- result <- strings.TrimSpace(scanner.Text())
- }
- }()
-
- select {
- case <-ctx.Done():
- _, _ = fmt.Fprintln(out, "")
- return "", ErrPromptTerminated
- case r := <-result:
- return r, nil
- }
+ return prompt.ReadInput(ctx, in, out, message)
}
// PromptForConfirmation requests and checks confirmation from the user.
@@ -130,67 +64,45 @@ func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message st
// the stack and close the io.Reader used for the prompt which will prevent the
// background goroutine from blocking indefinitely.
func PromptForConfirmation(ctx context.Context, ins io.Reader, outs io.Writer, message string) (bool, error) {
- if message == "" {
- message = "Are you sure you want to proceed?"
- }
- message += " [y/N] "
-
- _, _ = fmt.Fprint(outs, message)
-
- // On Windows, force the use of the regular OS stdin stream.
- if runtime.GOOS == "windows" {
- ins = streams.NewIn(os.Stdin)
- }
-
- result := make(chan bool)
-
- go func() {
- var res bool
- scanner := bufio.NewScanner(ins)
- if scanner.Scan() {
- answer := strings.TrimSpace(scanner.Text())
- if strings.EqualFold(answer, "y") {
- res = true
- }
- }
- result <- res
- }()
-
- select {
- case <-ctx.Done():
- _, _ = fmt.Fprintln(outs, "")
- return false, ErrPromptTerminated
- case r := <-result:
- return r, nil
- }
+ return prompt.Confirm(ctx, ins, outs, message)
}
-// PruneFilters returns consolidated prune filters obtained from config.json and cli
-func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
- if dockerCli.ConfigFile() == nil {
+// PruneFilters merges prune filters specified in config.json with those specified
+// as command-line flags.
+//
+// CLI label filters have precedence over those specified in config.json. If a
+// label filter specified as flag conflicts with a label defined in config.json
+// (i.e., "label=some-value" conflicts with "label!=some-value", and vice versa),
+// then the filter defined in config.json is omitted.
+func PruneFilters(dockerCLI config.Provider, pruneFilters filters.Args) filters.Args {
+ cfg := dockerCLI.ConfigFile()
+ if cfg == nil {
return pruneFilters
}
- for _, f := range dockerCli.ConfigFile().PruneFilters {
+
+ // Merge filters provided through the CLI with default filters defined
+ // in the CLI-configfile.
+ for _, f := range cfg.PruneFilters {
k, v, ok := strings.Cut(f, "=")
if !ok {
continue
}
- if k == "label" {
- // CLI label filter supersede config.json.
- // If CLI label filter conflict with config.json,
- // skip adding label! filter in config.json.
- if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", v) {
+ switch k {
+ case "label":
+ // "label != some-value" conflicts with "label = some-value"
+ if pruneFilters.ExactMatch("label!", v) {
continue
}
- } else if k == "label!" {
- // CLI label! filter supersede config.json.
- // If CLI label! filter conflict with config.json,
- // skip adding label filter in config.json.
- if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", v) {
+ pruneFilters.Add(k, v)
+ case "label!":
+ // "label != some-value" conflicts with "label = some-value"
+ if pruneFilters.ExactMatch("label", v) {
continue
}
+ pruneFilters.Add(k, v)
+ default:
+ pruneFilters.Add(k, v)
}
- pruneFilters.Add(k, v)
}
return pruneFilters
@@ -202,7 +114,7 @@ func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
_ = flags.SetAnnotation("platform", "version", []string{"1.32"})
}
-// ValidateOutputPath validates the output paths of the `export` and `save` commands.
+// ValidateOutputPath validates the output paths of the "docker cp" command.
func ValidateOutputPath(path string) error {
dir := filepath.Dir(filepath.Clean(path))
if dir != "" && dir != "." {
@@ -228,8 +140,8 @@ func ValidateOutputPath(path string) error {
return nil
}
-// ValidateOutputPathFileMode validates the output paths of the `cp` command and serves as a
-// helper to `ValidateOutputPath`
+// ValidateOutputPathFileMode validates the output paths of the "docker cp" command
+// and serves as a helper to [ValidateOutputPath]
func ValidateOutputPathFileMode(fileMode os.FileMode) error {
switch {
case fileMode&os.ModeDevice != 0:
@@ -240,47 +152,21 @@ func ValidateOutputPathFileMode(fileMode os.FileMode) error {
return nil
}
-func stringSliceIndex(s, subs []string) int {
- j := 0
- if len(subs) > 0 {
- for i, x := range s {
- if j < len(subs) && subs[j] == x {
- j++
- } else {
- j = 0
- }
- if len(subs) == j {
- return i + 1 - j
- }
- }
- }
- return -1
+func invalidParameter(err error) error {
+ return invalidParameterErr{err}
}
-// StringSliceReplaceAt replaces the sub-slice find, with the sub-slice replace, in the string
-// slice s, returning a new slice and a boolean indicating if the replacement happened.
-// requireIdx is the index at which old needs to be found at (or -1 to disregard that).
-func StringSliceReplaceAt(s, find, replace []string, requireIndex int) ([]string, bool) {
- idx := stringSliceIndex(s, find)
- if (requireIndex != -1 && requireIndex != idx) || idx == -1 {
- return s, false
- }
- out := append([]string{}, s[:idx]...)
- out = append(out, replace...)
- out = append(out, s[idx+len(find):]...)
- return out, true
+type invalidParameterErr struct{ error }
+
+func (invalidParameterErr) InvalidParameter() {}
+
+func notFound(err error) error {
+ return notFoundErr{err}
}
-// ValidateMountWithAPIVersion validates a mount with the server API version.
-func ValidateMountWithAPIVersion(m mounttypes.Mount, serverAPIVersion string) error {
- if m.BindOptions != nil {
- if m.BindOptions.NonRecursive && versions.LessThan(serverAPIVersion, "1.40") {
- return errors.Errorf("bind-recursive=disabled requires API v1.40 or later")
- }
- // ReadOnlyNonRecursive can be safely ignored when API < 1.44
- if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(serverAPIVersion, "1.44") {
- return errors.Errorf("bind-recursive=readonly requires API v1.44 or later")
- }
- }
- return nil
+type notFoundErr struct{ error }
+
+func (notFoundErr) NotFound() {}
+func (e notFoundErr) Unwrap() error {
+ return e.error
}
diff --git a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go
index ee11656f..c7bee693 100644
--- a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go
+++ b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package interpolation
@@ -67,7 +67,10 @@ func recursiveInterpolate(value any, path Path, opts Options) (any, error) {
return newValue, nil
}
casted, err := caster(newValue)
- return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
+ if err != nil {
+ return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
+ }
+ return casted, nil
case map[string]any:
out := map[string]any{}
diff --git a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go
index 82c36d7d..93ac9d83 100644
--- a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go
+++ b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package loader
diff --git a/vendor/github.com/docker/cli/cli/compose/loader/loader.go b/vendor/github.com/docker/cli/cli/compose/loader/loader.go
index 6cd2d203..b2673394 100644
--- a/vendor/github.com/docker/cli/cli/compose/loader/loader.go
+++ b/vendor/github.com/docker/cli/cli/compose/loader/loader.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package loader
@@ -18,9 +18,10 @@ import (
"github.com/docker/cli/cli/compose/template"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/opts"
+ "github.com/docker/cli/opts/swarmopts"
"github.com/docker/docker/api/types/versions"
"github.com/docker/go-connections/nat"
- units "github.com/docker/go-units"
+ "github.com/docker/go-units"
"github.com/go-viper/mapstructure/v2"
"github.com/google/shlex"
"github.com/pkg/errors"
@@ -925,7 +926,7 @@ func toServicePortConfigs(value string) ([]any, error) {
for _, key := range keys {
// Reuse ConvertPortToPortConfig so that it is consistent
- portConfig, err := opts.ConvertPortToPortConfig(nat.Port(key), portBindings)
+ portConfig, err := swarmopts.ConvertPortToPortConfig(nat.Port(key), portBindings)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/cli/cli/compose/loader/merge.go b/vendor/github.com/docker/cli/cli/compose/loader/merge.go
index ee0a39f9..8c0f35db 100644
--- a/vendor/github.com/docker/cli/cli/compose/loader/merge.go
+++ b/vendor/github.com/docker/cli/cli/compose/loader/merge.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package loader
diff --git a/vendor/github.com/docker/cli/cli/compose/schema/schema.go b/vendor/github.com/docker/cli/cli/compose/schema/schema.go
index b636ea5b..1484410d 100644
--- a/vendor/github.com/docker/cli/cli/compose/schema/schema.go
+++ b/vendor/github.com/docker/cli/cli/compose/schema/schema.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package schema
diff --git a/vendor/github.com/docker/cli/cli/compose/template/template.go b/vendor/github.com/docker/cli/cli/compose/template/template.go
index 1507c0ee..b823b499 100644
--- a/vendor/github.com/docker/cli/cli/compose/template/template.go
+++ b/vendor/github.com/docker/cli/cli/compose/template/template.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package template
@@ -7,6 +7,8 @@ import (
"fmt"
"regexp"
"strings"
+
+ "github.com/docker/cli/internal/lazyregexp"
)
const (
@@ -14,11 +16,21 @@ const (
subst = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
)
-var defaultPattern = regexp.MustCompile(fmt.Sprintf(
+var defaultPattern = lazyregexp.New(fmt.Sprintf(
"%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))",
delimiter, delimiter, subst, subst,
))
+// regexper is an internal interface to allow passing a [lazyregexp.Regexp]
+// in places where a custom ("regular") [regexp.Regexp] is accepted. It defines
+// only the methods we currently use.
+type regexper interface {
+ FindAllStringSubmatch(s string, n int) [][]string
+ FindStringSubmatch(s string) []string
+ ReplaceAllStringFunc(src string, repl func(string) string) string
+ SubexpNames() []string
+}
+
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
var DefaultSubstituteFuncs = []SubstituteFunc{
softDefault,
@@ -51,10 +63,16 @@ type SubstituteFunc func(string, Mapping) (string, bool, error)
// SubstituteWith substitutes variables in the string with their values.
// It accepts additional substitute function.
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
+ return substituteWith(template, mapping, pattern, subsFuncs...)
+}
+
+// SubstituteWith substitutes variables in the string with their values.
+// It accepts additional substitute function.
+func substituteWith(template string, mapping Mapping, pattern regexper, subsFuncs ...SubstituteFunc) (string, error) {
var err error
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
matches := pattern.FindStringSubmatch(substring)
- groups := matchGroups(matches, pattern)
+ groups := matchGroups(matches, defaultPattern)
if escaped := groups["escaped"]; escaped != "" {
return escaped
}
@@ -93,38 +111,42 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
// Substitute variables in the string with their values
func Substitute(template string, mapping Mapping) (string, error) {
- return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
+ return substituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
}
// ExtractVariables returns a map of all the variables defined in the specified
// composefile (dict representation) and their default value if any.
func ExtractVariables(configDict map[string]any, pattern *regexp.Regexp) map[string]string {
+ return extractVariables(configDict, pattern)
+}
+
+func extractVariables(configDict map[string]any, pattern regexper) map[string]string {
if pattern == nil {
pattern = defaultPattern
}
return recurseExtract(configDict, pattern)
}
-func recurseExtract(value any, pattern *regexp.Regexp) map[string]string {
+func recurseExtract(value any, pattern regexper) map[string]string {
m := map[string]string{}
- switch value := value.(type) {
+ switch val := value.(type) {
case string:
- if values, is := extractVariable(value, pattern); is {
+ if values, is := extractVariable(val, pattern); is {
for _, v := range values {
m[v.name] = v.value
}
}
case map[string]any:
- for _, elem := range value {
+ for _, elem := range val {
submap := recurseExtract(elem, pattern)
- for key, value := range submap {
- m[key] = value
+ for k, v := range submap {
+ m[k] = v
}
}
case []any:
- for _, elem := range value {
+ for _, elem := range val {
if values, is := extractVariable(elem, pattern); is {
for _, v := range values {
m[v.name] = v.value
@@ -141,7 +163,7 @@ type extractedValue struct {
value string
}
-func extractVariable(value any, pattern *regexp.Regexp) ([]extractedValue, bool) {
+func extractVariable(value any, pattern regexper) ([]extractedValue, bool) {
sValue, ok := value.(string)
if !ok {
return []extractedValue{}, false
@@ -227,7 +249,7 @@ func withRequired(substitution string, mapping Mapping, sep string, valid func(s
return value, true, nil
}
-func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
+func matchGroups(matches []string, pattern regexper) map[string]string {
groups := make(map[string]string)
for i, name := range pattern.SubexpNames()[1:] {
groups[name] = matches[i+1]
diff --git a/vendor/github.com/docker/cli/cli/compose/types/types.go b/vendor/github.com/docker/cli/cli/compose/types/types.go
index 1377a795..0804388a 100644
--- a/vendor/github.com/docker/cli/cli/compose/types/types.go
+++ b/vendor/github.com/docker/cli/cli/compose/types/types.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package types
diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go
index 910b3c00..cbb34486 100644
--- a/vendor/github.com/docker/cli/cli/config/config.go
+++ b/vendor/github.com/docker/cli/cli/config/config.go
@@ -58,7 +58,7 @@ func resetConfigDir() {
// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker
// as dependency for consumers that only need to read the config-file.
//
-// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get
+// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get
func getHomeDir() string {
home, _ := os.UserHomeDir()
if home == "" && runtime.GOOS != "windows" {
@@ -69,6 +69,11 @@ func getHomeDir() string {
return home
}
+// Provider defines an interface for providing the CLI config.
+type Provider interface {
+ ConfigFile() *configfile.ConfigFile
+}
+
// Dir returns the directory the configuration file is stored in
func Dir() string {
initConfigDir.Do(func() {
diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go
index ae9dcb33..530c5228 100644
--- a/vendor/github.com/docker/cli/cli/config/configfile/file.go
+++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go
@@ -3,12 +3,14 @@ package configfile
import (
"encoding/base64"
"encoding/json"
+ "fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/cli/cli/config/credentials"
+ "github.com/docker/cli/cli/config/memorystore"
"github.com/docker/cli/cli/config/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -36,14 +38,41 @@ type ConfigFile struct {
NodesFormat string `json:"nodesFormat,omitempty"`
PruneFilters []string `json:"pruneFilters,omitempty"`
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
- Experimental string `json:"experimental,omitempty"`
CurrentContext string `json:"currentContext,omitempty"`
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
Plugins map[string]map[string]string `json:"plugins,omitempty"`
Aliases map[string]string `json:"aliases,omitempty"`
Features map[string]string `json:"features,omitempty"`
+
+ // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release.
+ Experimental string `json:"experimental,omitempty"`
}
+type configEnvAuth struct {
+ Auth string `json:"auth"`
+}
+
+type configEnv struct {
+ AuthConfigs map[string]configEnvAuth `json:"auths"`
+}
+
+// DockerEnvConfigKey is an environment variable that contains a JSON encoded
+// credential config. It only supports storing the credentials as a base64
+// encoded string in the format base64("username:pat").
+//
+// Adding additional fields will produce a parsing error.
+//
+// Example:
+//
+// {
+// "auths": {
+// "example.test": {
+// "auth": base64-encoded-username-pat
+// }
+// }
+// }
+const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG"
+
// ProxyConfig contains proxy configuration settings
type ProxyConfig struct {
HTTPProxy string `json:"httpProxy,omitempty"`
@@ -150,7 +179,8 @@ func (configFile *ConfigFile) Save() (retErr error) {
return err
}
defer func() {
- temp.Close()
+ // ignore error as the file may already be closed when we reach this.
+ _ = temp.Close()
if retErr != nil {
if err := os.Remove(temp.Name()); err != nil {
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
@@ -167,10 +197,16 @@ func (configFile *ConfigFile) Save() (retErr error) {
return errors.Wrap(err, "error closing temp file")
}
- // Handle situation where the configfile is a symlink
+ // Handle situation where the configfile is a symlink, and allow for dangling symlinks
cfgFile := configFile.Filename
- if f, err := os.Readlink(cfgFile); err == nil {
+ if f, err := filepath.EvalSymlinks(cfgFile); err == nil {
cfgFile = f
+ } else if os.IsNotExist(err) {
+ // extract the path from the error if the configfile does not exist or is a dangling symlink
+ var pathError *os.PathError
+ if errors.As(err, &pathError) {
+ cfgFile = pathError.Path
+ }
}
// Try copying the current config file (if any) ownership and permissions
@@ -254,10 +290,64 @@ func decodeAuth(authStr string) (string, string, error) {
// GetCredentialsStore returns a new credentials store from the settings in the
// configuration file
func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store {
+ store := credentials.NewFileStore(configFile)
+
if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" {
- return newNativeStore(configFile, helper)
+ store = newNativeStore(configFile, helper)
}
- return credentials.NewFileStore(configFile)
+
+ envConfig := os.Getenv(DockerEnvConfigKey)
+ if envConfig == "" {
+ return store
+ }
+
+ authConfig, err := parseEnvConfig(envConfig)
+ if err != nil {
+ _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err)
+ return store
+ }
+
+ // use DOCKER_AUTH_CONFIG if set
+ // it uses the native or file store as a fallback to fetch and store credentials
+ envStore, err := memorystore.New(
+ memorystore.WithAuthConfig(authConfig),
+ memorystore.WithFallbackStore(store),
+ )
+ if err != nil {
+ _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err)
+ return store
+ }
+
+ return envStore
+}
+
+func parseEnvConfig(v string) (map[string]types.AuthConfig, error) {
+ envConfig := &configEnv{}
+ decoder := json.NewDecoder(strings.NewReader(v))
+ decoder.DisallowUnknownFields()
+ if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) {
+ return nil, err
+ }
+ if decoder.More() {
+ return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object")
+ }
+
+ authConfigs := make(map[string]types.AuthConfig)
+ for addr, envAuth := range envConfig.AuthConfigs {
+ if envAuth.Auth == "" {
+ return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr)
+ }
+ username, password, err := decodeAuth(envAuth.Auth)
+ if err != nil {
+ return nil, err
+ }
+ authConfigs[addr] = types.AuthConfig{
+ Username: username,
+ Password: password,
+ ServerAddress: addr,
+ }
+ }
+ return authConfigs, nil
}
// var for unit testing.
diff --git a/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/vendor/github.com/docker/cli/cli/config/memorystore/store.go
new file mode 100644
index 00000000..19908346
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli/config/memorystore/store.go
@@ -0,0 +1,126 @@
+//go:build go1.23
+
+package memorystore
+
+import (
+ "errors"
+ "fmt"
+ "maps"
+ "os"
+ "sync"
+
+ "github.com/docker/cli/cli/config/credentials"
+ "github.com/docker/cli/cli/config/types"
+)
+
+var errValueNotFound = errors.New("value not found")
+
+func IsErrValueNotFound(err error) bool {
+ return errors.Is(err, errValueNotFound)
+}
+
+type Config struct {
+ lock sync.RWMutex
+ memoryCredentials map[string]types.AuthConfig
+ fallbackStore credentials.Store
+}
+
+func (e *Config) Erase(serverAddress string) error {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ delete(e.memoryCredentials, serverAddress)
+
+ if e.fallbackStore != nil {
+ err := e.fallbackStore.Erase(serverAddress)
+ if err != nil {
+ _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err)
+ }
+ }
+
+ return nil
+}
+
+func (e *Config) Get(serverAddress string) (types.AuthConfig, error) {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+ authConfig, ok := e.memoryCredentials[serverAddress]
+ if !ok {
+ if e.fallbackStore != nil {
+ return e.fallbackStore.Get(serverAddress)
+ }
+ return types.AuthConfig{}, errValueNotFound
+ }
+ return authConfig, nil
+}
+
+func (e *Config) GetAll() (map[string]types.AuthConfig, error) {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+ creds := make(map[string]types.AuthConfig)
+
+ if e.fallbackStore != nil {
+ fileCredentials, err := e.fallbackStore.GetAll()
+ if err != nil {
+ _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err)
+ } else {
+ creds = fileCredentials
+ }
+ }
+
+ maps.Copy(creds, e.memoryCredentials)
+ return creds, nil
+}
+
+func (e *Config) Store(authConfig types.AuthConfig) error {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ e.memoryCredentials[authConfig.ServerAddress] = authConfig
+
+ if e.fallbackStore != nil {
+ return e.fallbackStore.Store(authConfig)
+ }
+ return nil
+}
+
+// WithFallbackStore sets a fallback store.
+//
+// Write operations will be performed on both the memory store and the
+// fallback store.
+//
+// Read operations will first check the memory store, and if the credential
+// is not found, it will then check the fallback store.
+//
+// Retrieving all credentials will return from both the memory store and the
+// fallback store, merging the results from both stores into a single map.
+//
+// Data stored in the memory store will take precedence over data in the
+// fallback store.
+func WithFallbackStore(store credentials.Store) Options {
+ return func(s *Config) error {
+ s.fallbackStore = store
+ return nil
+ }
+}
+
+// WithAuthConfig allows to set the initial credentials in the memory store.
+func WithAuthConfig(config map[string]types.AuthConfig) Options {
+ return func(s *Config) error {
+ s.memoryCredentials = config
+ return nil
+ }
+}
+
+type Options func(*Config) error
+
+// New creates a new in memory credential store
+func New(opts ...Options) (credentials.Store, error) {
+ m := &Config{
+ memoryCredentials: make(map[string]types.AuthConfig),
+ }
+ for _, opt := range opts {
+ if err := opt(m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
index 52888a91..4b04f8b3 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
@@ -28,7 +28,6 @@ import (
"syscall"
"time"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -149,7 +148,7 @@ func (c *commandConn) handleEOF(err error) error {
c.stderrMu.Lock()
stderr := c.stderr.String()
c.stderrMu.Unlock()
- return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr)
+ return fmt.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr)
}
}
@@ -159,7 +158,7 @@ func (c *commandConn) handleEOF(err error) error {
c.stderrMu.Lock()
stderr := c.stderr.String()
c.stderrMu.Unlock()
- return errors.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
+ return fmt.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
}
func ignorableCloseError(err error) bool {
diff --git a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
index 152d3e29..25ce7aef 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
@@ -3,13 +3,13 @@ package connhelper
import (
"context"
+ "fmt"
"net"
"net/url"
"strings"
"github.com/docker/cli/cli/connhelper/commandconn"
"github.com/docker/cli/cli/connhelper/ssh"
- "github.com/pkg/errors"
)
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
@@ -41,20 +41,25 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper
return nil, err
}
if u.Scheme == "ssh" {
- sp, err := ssh.ParseURL(daemonURL)
+ sp, err := ssh.NewSpec(u)
if err != nil {
- return nil, errors.Wrap(err, "ssh host connection is not valid")
+ return nil, fmt.Errorf("ssh host connection is not valid: %w", err)
}
sshFlags = addSSHTimeout(sshFlags)
sshFlags = disablePseudoTerminalAllocation(sshFlags)
+
+ remoteCommand := []string{"docker", "system", "dial-stdio"}
+ socketPath := sp.Path
+ if strings.Trim(sp.Path, "/") != "" {
+ remoteCommand = []string{"docker", "--host=unix://" + socketPath, "system", "dial-stdio"}
+ }
+ sshArgs, err := sp.Command(sshFlags, remoteCommand...)
+ if err != nil {
+ return nil, err
+ }
return &ConnectionHelper{
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
- args := []string{"docker"}
- if sp.Path != "" {
- args = append(args, "--host", "unix://"+sp.Path)
- }
- args = append(args, "system", "dial-stdio")
- return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args(args...)...)...)
+ return commandconn.New(ctx, "ssh", sshArgs...)
},
Host: "http://docker.example.com",
}, nil
diff --git a/vendor/github.com/miekg/pkcs11/LICENSE b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/LICENSE
similarity index 91%
rename from vendor/github.com/miekg/pkcs11/LICENSE
rename to vendor/github.com/docker/cli/cli/connhelper/internal/syntax/LICENSE
index ce25d13a..2a5268e5 100644
--- a/vendor/github.com/miekg/pkcs11/LICENSE
+++ b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013 Miek Gieben. All rights reserved.
+Copyright (c) 2016, Daniel Martí. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Miek Gieben nor the names of its
+ * Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/doc.go b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/doc.go
new file mode 100644
index 00000000..32cf60c7
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/doc.go
@@ -0,0 +1,13 @@
+// Package syntax is a fork of [mvdan.cc/sh/v3@v3.10.0/syntax].
+//
+// Copyright (c) 2016, Daniel Martí. All rights reserved.
+//
+// It is a reduced set of the package to only provide the [Quote] function,
+// and contains the [LICENSE], [quote.go] and [parser.go] files at the given
+// revision.
+//
+// [quote.go]: https://raw.githubusercontent.com/mvdan/sh/refs/tags/v3.10.0/syntax/quote.go
+// [parser.go]: https://raw.githubusercontent.com/mvdan/sh/refs/tags/v3.10.0/syntax/parser.go
+// [LICENSE]: https://raw.githubusercontent.com/mvdan/sh/refs/tags/v3.10.0/LICENSE
+// [mvdan.cc/sh/v3@v3.10.0/syntax]: https://pkg.go.dev/mvdan.cc/sh/v3@v3.10.0/syntax
+package syntax
diff --git a/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/parser.go b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/parser.go
new file mode 100644
index 00000000..06b1222f
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/parser.go
@@ -0,0 +1,95 @@
+// Copyright (c) 2016, Daniel Martí
+// See LICENSE for licensing information
+
+package syntax
+
+// LangVariant describes a shell language variant to use when tokenizing and
+// parsing shell code. The zero value is [LangBash].
+type LangVariant int
+
+const (
+ // LangBash corresponds to the GNU Bash language, as described in its
+ // manual at https://www.gnu.org/software/bash/manual/bash.html.
+ //
+ // We currently follow Bash version 5.2.
+ //
+ // Its string representation is "bash".
+ LangBash LangVariant = iota
+
+ // LangPOSIX corresponds to the POSIX Shell language, as described at
+ // https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html.
+ //
+ // Its string representation is "posix" or "sh".
+ LangPOSIX
+
+ // LangMirBSDKorn corresponds to the MirBSD Korn Shell, also known as
+ // mksh, as described at http://www.mirbsd.org/htman/i386/man1/mksh.htm.
+ // Note that it shares some features with Bash, due to the shared
+ // ancestry that is ksh.
+ //
+ // We currently follow mksh version 59.
+ //
+ // Its string representation is "mksh".
+ LangMirBSDKorn
+
+ // LangBats corresponds to the Bash Automated Testing System language,
+ // as described at https://github.com/bats-core/bats-core. Note that
+ // it's just a small extension of the Bash language.
+ //
+ // Its string representation is "bats".
+ LangBats
+
+ // LangAuto corresponds to automatic language detection,
+ // commonly used by end-user applications like shfmt,
+ // which can guess a file's language variant given its filename or shebang.
+ //
+ // At this time, [Variant] does not support LangAuto.
+ LangAuto
+)
+
+func (l LangVariant) String() string {
+ switch l {
+ case LangBash:
+ return "bash"
+ case LangPOSIX:
+ return "posix"
+ case LangMirBSDKorn:
+ return "mksh"
+ case LangBats:
+ return "bats"
+ case LangAuto:
+ return "auto"
+ }
+ return "unknown shell language variant"
+}
+
+// IsKeyword returns true if the given word is part of the language keywords.
+func IsKeyword(word string) bool {
+ // This list has been copied from the bash 5.1 source code, file y.tab.c +4460
+ switch word {
+ case
+ "!",
+ "[[", // only if COND_COMMAND is defined
+ "]]", // only if COND_COMMAND is defined
+ "case",
+ "coproc", // only if COPROCESS_SUPPORT is defined
+ "do",
+ "done",
+ "else",
+ "esac",
+ "fi",
+ "for",
+ "function",
+ "if",
+ "in",
+ "select", // only if SELECT_COMMAND is defined
+ "then",
+ "time", // only if COMMAND_TIMING is defined
+ "until",
+ "while",
+ "{",
+ "}":
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/quote.go b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/quote.go
new file mode 100644
index 00000000..628fa489
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli/connhelper/internal/syntax/quote.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2021, Daniel Martí
+// See LICENSE for licensing information
+
+package syntax
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type QuoteError struct {
+ ByteOffset int
+ Message string
+}
+
+func (e QuoteError) Error() string {
+ return fmt.Sprintf("cannot quote character at byte %d: %s", e.ByteOffset, e.Message)
+}
+
+const (
+ quoteErrNull = "shell strings cannot contain null bytes"
+ quoteErrPOSIX = "POSIX shell lacks escape sequences"
+ quoteErrRange = "rune out of range"
+ quoteErrMksh = "mksh cannot escape codepoints above 16 bits"
+)
+
+// Quote returns a quoted version of the input string,
+// so that the quoted version is expanded or interpreted
+// as the original string in the given language variant.
+//
+// Quoting is necessary when using arbitrary literal strings
+// as words in a shell script or command.
+// Without quoting, one can run into syntax errors,
+// as well as the possibility of running unintended code.
+//
+// An error is returned when a string cannot be quoted for a variant.
+// For instance, POSIX lacks escape sequences for non-printable characters,
+// and no language variant can represent a string containing null bytes.
+// In such cases, the returned error type will be *QuoteError.
+//
+// The quoting strategy is chosen on a best-effort basis,
+// to minimize the amount of extra bytes necessary.
+//
+// Some strings do not require any quoting and are returned unchanged.
+// Those strings can be directly surrounded in single quotes as well.
+//
+//nolint:gocyclo // ignore "cyclomatic complexity 35 of func `Quote` is high (> 16) (gocyclo)"
+func Quote(s string, lang LangVariant) (string, error) {
+ if s == "" {
+ // Special case; an empty string must always be quoted,
+ // as otherwise it expands to zero fields.
+ return "''", nil
+ }
+ shellChars := false
+ nonPrintable := false
+ offs := 0
+ for rem := s; len(rem) > 0; {
+ r, size := utf8.DecodeRuneInString(rem)
+ switch r {
+ // Like regOps; token characters.
+ case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`',
+ // Whitespace; might result in multiple fields.
+ ' ', '\t', '\r', '\n',
+ // Escape sequences would be expanded.
+ '\\',
+ // Would start a comment unless quoted.
+ '#',
+ // Might result in brace expansion.
+ '{',
+ // Might result in tilde expansion.
+ '~',
+ // Might result in globbing.
+ '*', '?', '[',
+ // Might result in an assignment.
+ '=':
+ shellChars = true
+ case '\x00':
+ return "", &QuoteError{ByteOffset: offs, Message: quoteErrNull}
+ }
+ if r == utf8.RuneError || !unicode.IsPrint(r) {
+ if lang == LangPOSIX {
+ return "", &QuoteError{ByteOffset: offs, Message: quoteErrPOSIX}
+ }
+ nonPrintable = true
+ }
+ rem = rem[size:]
+ offs += size
+ }
+ if !shellChars && !nonPrintable && !IsKeyword(s) {
+ // Nothing to quote; avoid allocating.
+ return s, nil
+ }
+
+ // Single quotes are usually best,
+ // as they don't require any escaping of characters.
+ // If we have any invalid utf8 or non-printable runes,
+ // use $'' so that we can escape them.
+ // Note that we can't use double quotes for those.
+ var b strings.Builder
+ if nonPrintable {
+ b.WriteString("$'")
+ lastRequoteIfHex := false
+ offs = 0
+ for rem := s; len(rem) > 0; {
+ nextRequoteIfHex := false
+ r, size := utf8.DecodeRuneInString(rem)
+ switch {
+ case r == '\'', r == '\\':
+ b.WriteByte('\\')
+ b.WriteRune(r)
+ case unicode.IsPrint(r) && r != utf8.RuneError:
+ if lastRequoteIfHex && isHex(r) {
+ b.WriteString("'$'")
+ }
+ b.WriteRune(r)
+ case r == '\a':
+ b.WriteString(`\a`)
+ case r == '\b':
+ b.WriteString(`\b`)
+ case r == '\f':
+ b.WriteString(`\f`)
+ case r == '\n':
+ b.WriteString(`\n`)
+ case r == '\r':
+ b.WriteString(`\r`)
+ case r == '\t':
+ b.WriteString(`\t`)
+ case r == '\v':
+ b.WriteString(`\v`)
+ case r < utf8.RuneSelf, r == utf8.RuneError && size == 1:
+ // \xXX, fixed at two hexadecimal characters.
+ fmt.Fprintf(&b, "\\x%02x", rem[0])
+ // Unfortunately, mksh allows \x to consume more hex characters.
+ // Ensure that we don't allow it to read more than two.
+ if lang == LangMirBSDKorn {
+ nextRequoteIfHex = true
+ }
+ case r > utf8.MaxRune:
+ // Not a valid Unicode code point?
+ return "", &QuoteError{ByteOffset: offs, Message: quoteErrRange}
+ case lang == LangMirBSDKorn && r > 0xFFFD:
+ // From the CAVEATS section in R59's man page:
+ //
+ // mksh currently uses OPTU-16 internally, which is the same as
+ // UTF-8 and CESU-8 with 0000..FFFD being valid codepoints.
+ return "", &QuoteError{ByteOffset: offs, Message: quoteErrMksh}
+ case r < 0x10000:
+ // \uXXXX, fixed at four hexadecimal characters.
+ fmt.Fprintf(&b, "\\u%04x", r)
+ default:
+ // \UXXXXXXXX, fixed at eight hexadecimal characters.
+ fmt.Fprintf(&b, "\\U%08x", r)
+ }
+ rem = rem[size:]
+ lastRequoteIfHex = nextRequoteIfHex
+ offs += size
+ }
+ b.WriteString("'")
+ return b.String(), nil
+ }
+
+ // Single quotes without any need for escaping.
+ if !strings.Contains(s, "'") {
+ return "'" + s + "'", nil
+ }
+
+ // The string contains single quotes,
+ // so fall back to double quotes.
+ b.WriteByte('"')
+ for _, r := range s {
+ switch r {
+ case '"', '\\', '`', '$':
+ b.WriteByte('\\')
+ }
+ b.WriteRune(r)
+ }
+ b.WriteByte('"')
+ return b.String(), nil
+}
+
+func isHex(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
index fb4c9111..2fcb54a9 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
@@ -2,19 +2,48 @@
package ssh
import (
+ "errors"
+ "fmt"
"net/url"
- "github.com/pkg/errors"
+ "github.com/docker/cli/cli/connhelper/internal/syntax"
)
-// ParseURL parses URL
+// ParseURL creates a [Spec] from the given ssh URL. It returns an error if
+// the URL is using the wrong scheme, contains fragments, query-parameters,
+// or contains a password.
func ParseURL(daemonURL string) (*Spec, error) {
u, err := url.Parse(daemonURL)
if err != nil {
- return nil, err
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) {
+ err = urlErr.Unwrap()
+ }
+ return nil, fmt.Errorf("invalid SSH URL: %w", err)
+ }
+ return NewSpec(u)
+}
+
+// NewSpec creates a [Spec] from the given ssh URL's properties. It returns
+// an error if the URL is using the wrong scheme, contains fragments,
+// query-parameters, or contains a password.
+func NewSpec(sshURL *url.URL) (*Spec, error) {
+ s, err := newSpec(sshURL)
+ if err != nil {
+ return nil, fmt.Errorf("invalid SSH URL: %w", err)
+ }
+ return s, nil
+}
+
+func newSpec(u *url.URL) (*Spec, error) {
+ if u == nil {
+ return nil, errors.New("URL is nil")
+ }
+ if u.Scheme == "" {
+ return nil, errors.New("no scheme provided")
}
if u.Scheme != "ssh" {
- return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme)
+ return nil, errors.New("incorrect scheme: " + u.Scheme)
}
var sp Spec
@@ -27,17 +56,18 @@ func ParseURL(daemonURL string) (*Spec, error) {
}
sp.Host = u.Hostname()
if sp.Host == "" {
- return nil, errors.Errorf("no host specified")
+ return nil, errors.New("hostname is empty")
}
sp.Port = u.Port()
sp.Path = u.Path
if u.RawQuery != "" {
- return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
+ return nil, fmt.Errorf("query parameters are not allowed: %q", u.RawQuery)
}
if u.Fragment != "" {
- return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment)
+ return nil, fmt.Errorf("fragments are not allowed: %q", u.Fragment)
}
- return &sp, err
+
+ return &sp, nil
}
// Spec of SSH URL
@@ -48,16 +78,106 @@ type Spec struct {
Path string
}
-// Args returns args except "ssh" itself combined with optional additional command args
-func (sp *Spec) Args(add ...string) []string {
+// Args returns args except "ssh" itself combined with optional additional
+// command and args to be executed on the remote host. It attempts to quote
+// the given arguments to account for ssh executing the remote command in a
+// shell. It returns nil when unable to quote the remote command.
+func (sp *Spec) Args(remoteCommandAndArgs ...string) []string {
+ // Format the remote command to run using the ssh connection, quoting
+ // values where needed because ssh executes these in a POSIX shell.
+ remoteCommand, err := quoteCommand(remoteCommandAndArgs...)
+ if err != nil {
+ return nil
+ }
+
+ sshArgs, err := sp.args()
+ if err != nil {
+ return nil
+ }
+ if remoteCommand != "" {
+ sshArgs = append(sshArgs, remoteCommand)
+ }
+ return sshArgs
+}
+
+func (sp *Spec) args(sshFlags ...string) ([]string, error) {
var args []string
+ if sp.Host == "" {
+ return nil, errors.New("no host specified")
+ }
if sp.User != "" {
- args = append(args, "-l", sp.User)
+ // Quote user, as it's obtained from the URL.
+ usr, err := syntax.Quote(sp.User, syntax.LangPOSIX)
+ if err != nil {
+ return nil, fmt.Errorf("invalid user: %w", err)
+ }
+ args = append(args, "-l", usr)
}
if sp.Port != "" {
- args = append(args, "-p", sp.Port)
+ // Quote port, as it's obtained from the URL.
+ port, err := syntax.Quote(sp.Port, syntax.LangPOSIX)
+ if err != nil {
+ return nil, fmt.Errorf("invalid port: %w", err)
+ }
+ args = append(args, "-p", port)
}
- args = append(args, "--", sp.Host)
- args = append(args, add...)
- return args
+
+ // We consider "sshFlags" to be "trusted", and set from code only,
+ // as they are not parsed from the DOCKER_HOST URL.
+ args = append(args, sshFlags...)
+
+ host, err := syntax.Quote(sp.Host, syntax.LangPOSIX)
+ if err != nil {
+ return nil, fmt.Errorf("invalid host: %w", err)
+ }
+
+ return append(args, "--", host), nil
+}
+
+// Command returns the ssh flags and arguments to execute a command
+// (remoteCommandAndArgs) on the remote host. Where needed, it quotes
+// values passed in remoteCommandAndArgs to account for ssh executing
+// the remote command in a shell. It returns an error if no remote command
+// is passed, or when unable to quote the remote command.
+//
+// Important: to preserve backward-compatibility, Command does not currently
+// perform sanitization or quoting on the sshFlags and callers are expected
+// to sanitize this argument.
+func (sp *Spec) Command(sshFlags []string, remoteCommandAndArgs ...string) ([]string, error) {
+ if len(remoteCommandAndArgs) == 0 {
+ return nil, errors.New("no remote command specified")
+ }
+ sshArgs, err := sp.args(sshFlags...)
+ if err != nil {
+ return nil, err
+ }
+ remoteCommand, err := quoteCommand(remoteCommandAndArgs...)
+ if err != nil {
+ return nil, err
+ }
+ if remoteCommand != "" {
+ sshArgs = append(sshArgs, remoteCommand)
+ }
+ return sshArgs, nil
+}
+
+// quoteCommand returns the remote command to run using the ssh connection
+// as a single string, quoting values where needed because ssh executes
+// these in a POSIX shell.
+func quoteCommand(commandAndArgs ...string) (string, error) {
+ var quotedCmd string
+ for i, arg := range commandAndArgs {
+ a, err := syntax.Quote(arg, syntax.LangPOSIX)
+ if err != nil {
+ return "", fmt.Errorf("invalid argument: %w", err)
+ }
+ if i == 0 {
+ quotedCmd = a
+ continue
+ }
+ quotedCmd += " " + a
+ }
+ // each part is quoted appropriately, so now we'll have a full
+ // shell command to pass off to "ssh"
+ return quotedCmd, nil
}
diff --git a/vendor/github.com/docker/cli/cli/context/docker/load.go b/vendor/github.com/docker/cli/cli/context/docker/load.go
index 700b73c9..89d43e2e 100644
--- a/vendor/github.com/docker/cli/cli/context/docker/load.go
+++ b/vendor/github.com/docker/cli/cli/context/docker/load.go
@@ -6,6 +6,7 @@ import (
"encoding/pem"
"net"
"net/http"
+ "strings"
"time"
"github.com/docker/cli/cli/connhelper"
@@ -90,14 +91,19 @@ func (ep *Endpoint) ClientOpts() ([]client.Opt, error) {
return nil, err
}
if helper == nil {
- tlsConfig, err := ep.tlsConfig()
- if err != nil {
- return nil, err
+ // Check if we're connecting over a socket, because there's no
+ // need to configure TLS for a socket connection.
+ //
+ // TODO(thaJeztah); make resolveDockerEndpoint and resolveDefaultDockerEndpoint not load TLS data,
+ // and load TLS files lazily; see https://github.com/docker/cli/pull/1581
+ if !isSocket(ep.Host) {
+ tlsConfig, err := ep.tlsConfig()
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, withHTTPClient(tlsConfig))
}
- result = append(result,
- withHTTPClient(tlsConfig),
- client.WithHost(ep.Host),
- )
+ result = append(result, client.WithHost(ep.Host))
} else {
result = append(result,
client.WithHTTPClient(&http.Client{
@@ -116,6 +122,17 @@ func (ep *Endpoint) ClientOpts() ([]client.Opt, error) {
return result, nil
}
+// isSocket checks if the given address is a Unix-socket (linux),
+// named pipe (Windows), or file-descriptor.
+func isSocket(addr string) bool {
+ switch proto, _, _ := strings.Cut(addr, "://"); proto {
+ case "unix", "npipe", "fd":
+ return true
+ default:
+ return false
+ }
+}
+
func withHTTPClient(tlsConfig *tls.Config) func(*client.Client) error {
return func(c *client.Client) error {
if tlsConfig == nil {
diff --git a/vendor/github.com/docker/cli/cli/context/store/errors.go b/vendor/github.com/docker/cli/cli/context/store/errors.go
new file mode 100644
index 00000000..e85ce325
--- /dev/null
+++ b/vendor/github.com/docker/cli/cli/context/store/errors.go
@@ -0,0 +1,28 @@
+package store
+
+import cerrdefs "github.com/containerd/errdefs"
+
+func invalidParameter(err error) error {
+ if err == nil || cerrdefs.IsInvalidArgument(err) {
+ return err
+ }
+ return invalidParameterErr{err}
+}
+
+type invalidParameterErr struct{ error }
+
+func (invalidParameterErr) InvalidParameter() {}
+
+func notFound(err error) error {
+ if err == nil || cerrdefs.IsNotFound(err) {
+ return err
+ }
+ return notFoundErr{err}
+}
+
+type notFoundErr struct{ error }
+
+func (notFoundErr) NotFound() {}
+func (e notFoundErr) Unwrap() error {
+ return e.error
+}
diff --git a/vendor/github.com/docker/cli/cli/context/store/io_utils.go b/vendor/github.com/docker/cli/cli/context/store/io_utils.go
index 6f854c8e..097443d0 100644
--- a/vendor/github.com/docker/cli/cli/context/store/io_utils.go
+++ b/vendor/github.com/docker/cli/cli/context/store/io_utils.go
@@ -5,14 +5,14 @@ import (
"io"
)
-// LimitedReader is a fork of io.LimitedReader to override Read.
-type LimitedReader struct {
+// limitedReader is a fork of [io.LimitedReader] to override Read.
+type limitedReader struct {
R io.Reader
N int64 // max bytes remaining
}
-// Read is a fork of io.LimitedReader.Read that returns an error when limit exceeded.
-func (l *LimitedReader) Read(p []byte) (n int, err error) {
+// Read is a fork of [io.LimitedReader.Read] that returns an error when limit exceeded.
+func (l *limitedReader) Read(p []byte) (n int, err error) {
if l.N < 0 {
return 0, errors.New("read exceeds the defined limit")
}
diff --git a/vendor/github.com/docker/cli/cli/context/store/metadatastore.go b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go
index e8b25675..deec5cc9 100644
--- a/vendor/github.com/docker/cli/cli/context/store/metadatastore.go
+++ b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go
@@ -1,20 +1,19 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package store
import (
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
"reflect"
"sort"
- "github.com/docker/docker/errdefs"
- "github.com/docker/docker/pkg/atomicwriter"
"github.com/fvbommel/sortorder"
- "github.com/pkg/errors"
+ "github.com/moby/sys/atomicwriter"
)
const (
@@ -64,7 +63,7 @@ func parseTypedOrMap(payload []byte, getter TypeGetter) (any, error) {
func (s *metadataStore) get(name string) (Metadata, error) {
m, err := s.getByID(contextdirOf(name))
if err != nil {
- return m, errors.Wrapf(err, "context %q", name)
+ return m, fmt.Errorf("context %q: %w", name, err)
}
return m, nil
}
@@ -74,7 +73,7 @@ func (s *metadataStore) getByID(id contextdir) (Metadata, error) {
bytes, err := os.ReadFile(fileName)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
- return Metadata{}, errdefs.NotFound(errors.Wrap(err, "context not found"))
+ return Metadata{}, notFound(fmt.Errorf("context not found: %w", err))
}
return Metadata{}, err
}
@@ -99,7 +98,7 @@ func (s *metadataStore) getByID(id contextdir) (Metadata, error) {
func (s *metadataStore) remove(name string) error {
if err := os.RemoveAll(s.contextDir(contextdirOf(name))); err != nil {
- return errors.Wrapf(err, "failed to remove metadata")
+ return fmt.Errorf("failed to remove metadata: %w", err)
}
return nil
}
@@ -119,7 +118,7 @@ func (s *metadataStore) list() ([]Metadata, error) {
if errors.Is(err, os.ErrNotExist) {
continue
}
- return nil, errors.Wrap(err, "failed to read metadata")
+ return nil, fmt.Errorf("failed to read metadata: %w", err)
}
res = append(res, c)
}
diff --git a/vendor/github.com/docker/cli/cli/context/store/store.go b/vendor/github.com/docker/cli/cli/context/store/store.go
index 3643e576..91d9c19c 100644
--- a/vendor/github.com/docker/cli/cli/context/store/store.go
+++ b/vendor/github.com/docker/cli/cli/context/store/store.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package store
@@ -10,21 +10,21 @@ import (
"bytes"
_ "crypto/sha256" // ensure ids can be computed
"encoding/json"
+ "errors"
+ "fmt"
"io"
"net/http"
"path"
"path/filepath"
- "regexp"
"strings"
- "github.com/docker/docker/errdefs"
+ "github.com/docker/cli/internal/lazyregexp"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$"
-var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern)
+var restrictedNameRegEx = lazyregexp.New(restrictedNamePattern)
// Store provides a context store for easily remembering endpoints configuration
type Store interface {
@@ -146,10 +146,10 @@ func (s *ContextStore) CreateOrUpdate(meta Metadata) error {
// Remove deletes the context with the given name, if found.
func (s *ContextStore) Remove(name string) error {
if err := s.meta.remove(name); err != nil {
- return errors.Wrapf(err, "failed to remove context %s", name)
+ return fmt.Errorf("failed to remove context %s: %w", name, err)
}
if err := s.tls.remove(name); err != nil {
- return errors.Wrapf(err, "failed to remove context %s", name)
+ return fmt.Errorf("failed to remove context %s: %w", name, err)
}
return nil
}
@@ -226,7 +226,7 @@ func ValidateContextName(name string) error {
return errors.New(`"default" is a reserved context name`)
}
if !restrictedNameRegEx.MatchString(name) {
- return errors.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern)
+ return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern)
}
return nil
}
@@ -356,7 +356,7 @@ func isValidFilePath(p string) error {
}
func importTar(name string, s Writer, reader io.Reader) error {
- tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
+ tr := tar.NewReader(&limitedReader{R: reader, N: maxAllowedFileSizeToImport})
tlsData := ContextTLSData{
Endpoints: map[string]EndpointTLSData{},
}
@@ -374,7 +374,7 @@ func importTar(name string, s Writer, reader io.Reader) error {
continue
}
if err := isValidFilePath(hdr.Name); err != nil {
- return errors.Wrap(err, hdr.Name)
+ return fmt.Errorf("%s: %w", hdr.Name, err)
}
if hdr.Name == metaFile {
data, err := io.ReadAll(tr)
@@ -400,13 +400,13 @@ func importTar(name string, s Writer, reader io.Reader) error {
}
}
if !importedMetaFile {
- return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
+ return invalidParameter(errors.New("invalid context: no metadata found"))
}
return s.ResetTLSMaterial(name, &tlsData)
}
func importZip(name string, s Writer, reader io.Reader) error {
- body, err := io.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
+ body, err := io.ReadAll(&limitedReader{R: reader, N: maxAllowedFileSizeToImport})
if err != nil {
return err
}
@@ -426,7 +426,7 @@ func importZip(name string, s Writer, reader io.Reader) error {
continue
}
if err := isValidFilePath(zf.Name); err != nil {
- return errors.Wrap(err, zf.Name)
+ return fmt.Errorf("%s: %w", zf.Name, err)
}
if zf.Name == metaFile {
f, err := zf.Open()
@@ -434,7 +434,7 @@ func importZip(name string, s Writer, reader io.Reader) error {
return err
}
- data, err := io.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
+ data, err := io.ReadAll(&limitedReader{R: f, N: maxAllowedFileSizeToImport})
defer f.Close()
if err != nil {
return err
@@ -464,7 +464,7 @@ func importZip(name string, s Writer, reader io.Reader) error {
}
}
if !importedMetaFile {
- return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
+ return invalidParameter(errors.New("invalid context: no metadata found"))
}
return s.ResetTLSMaterial(name, &tlsData)
}
diff --git a/vendor/github.com/docker/cli/cli/context/store/storeconfig.go b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go
index bfd5e6fc..fccbf1d1 100644
--- a/vendor/github.com/docker/cli/cli/context/store/storeconfig.go
+++ b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package store
diff --git a/vendor/github.com/docker/cli/cli/context/store/tlsstore.go b/vendor/github.com/docker/cli/cli/context/store/tlsstore.go
index 3cbfe627..c1f5f8ca 100644
--- a/vendor/github.com/docker/cli/cli/context/store/tlsstore.go
+++ b/vendor/github.com/docker/cli/cli/context/store/tlsstore.go
@@ -1,12 +1,11 @@
package store
import (
+ "fmt"
"os"
"path/filepath"
- "github.com/docker/docker/errdefs"
- "github.com/docker/docker/pkg/atomicwriter"
- "github.com/pkg/errors"
+ "github.com/moby/sys/atomicwriter"
)
const tlsDir = "tls"
@@ -39,9 +38,9 @@ func (s *tlsStore) getData(name, endpointName, filename string) ([]byte, error)
data, err := os.ReadFile(filepath.Join(s.endpointDir(name, endpointName), filename))
if err != nil {
if os.IsNotExist(err) {
- return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", name, endpointName, filename))
+ return nil, notFound(fmt.Errorf("TLS data for %s/%s/%s does not exist", name, endpointName, filename))
}
- return nil, errors.Wrapf(err, "failed to read TLS data for endpoint %s", endpointName)
+ return nil, fmt.Errorf("failed to read TLS data for endpoint %s: %w", endpointName, err)
}
return data, nil
}
@@ -49,14 +48,14 @@ func (s *tlsStore) getData(name, endpointName, filename string) ([]byte, error)
// remove deletes all TLS data for the given context.
func (s *tlsStore) remove(name string) error {
if err := os.RemoveAll(s.contextDir(name)); err != nil {
- return errors.Wrapf(err, "failed to remove TLS data")
+ return fmt.Errorf("failed to remove TLS data: %w", err)
}
return nil
}
func (s *tlsStore) removeEndpoint(name, endpointName string) error {
if err := os.RemoveAll(s.endpointDir(name, endpointName)); err != nil {
- return errors.Wrapf(err, "failed to remove TLS data for endpoint %s", endpointName)
+ return fmt.Errorf("failed to remove TLS data for endpoint %s: %w", endpointName, err)
}
return nil
}
@@ -68,7 +67,7 @@ func (s *tlsStore) listContextData(name string) (map[string]EndpointFiles, error
if os.IsNotExist(err) {
return map[string]EndpointFiles{}, nil
}
- return nil, errors.Wrapf(err, "failed to list TLS files for context %s", name)
+ return nil, fmt.Errorf("failed to list TLS files for context %s: %w", name, err)
}
r := make(map[string]EndpointFiles)
for _, epFS := range epFSs {
@@ -78,7 +77,7 @@ func (s *tlsStore) listContextData(name string) (map[string]EndpointFiles, error
continue
}
if err != nil {
- return nil, errors.Wrapf(err, "failed to list TLS files for endpoint %s", epFS.Name())
+ return nil, fmt.Errorf("failed to list TLS files for endpoint %s: %w", epFS.Name(), err)
}
var files EndpointFiles
for _, fs := range fss {
diff --git a/vendor/github.com/docker/cli/cli/debug/debug.go b/vendor/github.com/docker/cli/cli/debug/debug.go
index 84002bd0..5ad1b032 100644
--- a/vendor/github.com/docker/cli/cli/debug/debug.go
+++ b/vendor/github.com/docker/cli/cli/debug/debug.go
@@ -33,5 +33,8 @@ func IsEnabled() bool {
// The default is to log to the debug level which is only
// enabled when debugging is enabled.
var OTELErrorHandler otel.ErrorHandler = otel.ErrorHandlerFunc(func(err error) {
+ if err == nil {
+ return
+ }
logrus.WithError(err).Debug("otel error")
})
diff --git a/vendor/github.com/docker/cli/cli/manifest/store/store.go b/vendor/github.com/docker/cli/cli/manifest/store/store.go
deleted file mode 100644
index e97e8628..00000000
--- a/vendor/github.com/docker/cli/cli/manifest/store/store.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package store
-
-import (
- "encoding/json"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/distribution/reference"
- "github.com/docker/cli/cli/manifest/types"
- "github.com/docker/distribution/manifest/manifestlist"
- "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-// Store manages local storage of image distribution manifests
-type Store interface {
- Remove(listRef reference.Reference) error
- Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error)
- GetList(listRef reference.Reference) ([]types.ImageManifest, error)
- Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error
-}
-
-// fsStore manages manifest files stored on the local filesystem
-type fsStore struct {
- root string
-}
-
-// NewStore returns a new store for a local file path
-func NewStore(root string) Store {
- return &fsStore{root: root}
-}
-
-// Remove a manifest list from local storage
-func (s *fsStore) Remove(listRef reference.Reference) error {
- path := filepath.Join(s.root, makeFilesafeName(listRef.String()))
- return os.RemoveAll(path)
-}
-
-// Get returns the local manifest
-func (s *fsStore) Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error) {
- filename := manifestToFilename(s.root, listRef.String(), manifest.String())
- return s.getFromFilename(manifest, filename)
-}
-
-func (*fsStore) getFromFilename(ref reference.Reference, filename string) (types.ImageManifest, error) {
- bytes, err := os.ReadFile(filename)
- switch {
- case os.IsNotExist(err):
- return types.ImageManifest{}, newNotFoundError(ref.String())
- case err != nil:
- return types.ImageManifest{}, err
- }
- var manifestInfo struct {
- types.ImageManifest
-
- // Deprecated Fields, replaced by Descriptor
- Digest digest.Digest
- Platform *manifestlist.PlatformSpec
- }
-
- if err := json.Unmarshal(bytes, &manifestInfo); err != nil {
- return types.ImageManifest{}, err
- }
-
- // Compatibility with image manifests created before
- // descriptor, newer versions omit Digest and Platform
- if manifestInfo.Digest != "" {
- mediaType, raw, err := manifestInfo.Payload()
- if err != nil {
- return types.ImageManifest{}, err
- }
- if dgst := digest.FromBytes(raw); dgst != manifestInfo.Digest {
- return types.ImageManifest{}, errors.Errorf("invalid manifest file %v: image manifest digest mismatch (%v != %v)", filename, manifestInfo.Digest, dgst)
- }
- manifestInfo.ImageManifest.Descriptor = ocispec.Descriptor{
- Digest: manifestInfo.Digest,
- Size: int64(len(raw)),
- MediaType: mediaType,
- Platform: types.OCIPlatform(manifestInfo.Platform),
- }
- }
-
- return manifestInfo.ImageManifest, nil
-}
-
-// GetList returns all the local manifests for a transaction
-func (s *fsStore) GetList(listRef reference.Reference) ([]types.ImageManifest, error) {
- filenames, err := s.listManifests(listRef.String())
- switch {
- case err != nil:
- return nil, err
- case filenames == nil:
- return nil, newNotFoundError(listRef.String())
- }
-
- manifests := []types.ImageManifest{}
- for _, filename := range filenames {
- filename = filepath.Join(s.root, makeFilesafeName(listRef.String()), filename)
- manifest, err := s.getFromFilename(listRef, filename)
- if err != nil {
- return nil, err
- }
- manifests = append(manifests, manifest)
- }
- return manifests, nil
-}
-
-// listManifests stored in a transaction
-func (s *fsStore) listManifests(transaction string) ([]string, error) {
- transactionDir := filepath.Join(s.root, makeFilesafeName(transaction))
- fileInfos, err := os.ReadDir(transactionDir)
- switch {
- case os.IsNotExist(err):
- return nil, nil
- case err != nil:
- return nil, err
- }
-
- filenames := make([]string, 0, len(fileInfos))
- for _, info := range fileInfos {
- filenames = append(filenames, info.Name())
- }
- return filenames, nil
-}
-
-// Save a manifest as part of a local manifest list
-func (s *fsStore) Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error {
- if err := s.createManifestListDirectory(listRef.String()); err != nil {
- return err
- }
- filename := manifestToFilename(s.root, listRef.String(), manifest.String())
- bytes, err := json.Marshal(image)
- if err != nil {
- return err
- }
- return os.WriteFile(filename, bytes, 0o644)
-}
-
-func (s *fsStore) createManifestListDirectory(transaction string) error {
- path := filepath.Join(s.root, makeFilesafeName(transaction))
- return os.MkdirAll(path, 0o755)
-}
-
-func manifestToFilename(root, manifestList, manifest string) string {
- return filepath.Join(root, makeFilesafeName(manifestList), makeFilesafeName(manifest))
-}
-
-func makeFilesafeName(ref string) string {
- fileName := strings.ReplaceAll(ref, ":", "-")
- return strings.ReplaceAll(fileName, "/", "_")
-}
-
-type notFoundError struct {
- object string
-}
-
-func newNotFoundError(ref string) *notFoundError {
- return ¬FoundError{object: ref}
-}
-
-func (n *notFoundError) Error() string {
- return "No such manifest: " + n.object
-}
-
-// NotFound interface
-func (*notFoundError) NotFound() {}
-
-// IsNotFound returns true if the error is a not found error
-func IsNotFound(err error) bool {
- _, ok := err.(notFound)
- return ok
-}
-
-type notFound interface {
- NotFound()
-}
diff --git a/vendor/github.com/docker/cli/cli/manifest/types/types.go b/vendor/github.com/docker/cli/cli/manifest/types/types.go
deleted file mode 100644
index e098928d..00000000
--- a/vendor/github.com/docker/cli/cli/manifest/types/types.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package types
-
-import (
- "encoding/json"
-
- "github.com/distribution/reference"
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest/manifestlist"
- "github.com/docker/distribution/manifest/ocischema"
- "github.com/docker/distribution/manifest/schema2"
- "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-// ImageManifest contains info to output for a manifest object.
-type ImageManifest struct {
- Ref *SerializableNamed
- Descriptor ocispec.Descriptor
- Raw []byte `json:",omitempty"`
-
- // SchemaV2Manifest is used for inspection
- SchemaV2Manifest *schema2.DeserializedManifest `json:",omitempty"`
- // OCIManifest is used for inspection
- OCIManifest *ocischema.DeserializedManifest `json:",omitempty"`
-}
-
-// OCIPlatform creates an OCI platform from a manifest list platform spec
-func OCIPlatform(ps *manifestlist.PlatformSpec) *ocispec.Platform {
- if ps == nil {
- return nil
- }
- return &ocispec.Platform{
- Architecture: ps.Architecture,
- OS: ps.OS,
- OSVersion: ps.OSVersion,
- OSFeatures: ps.OSFeatures,
- Variant: ps.Variant,
- }
-}
-
-// PlatformSpecFromOCI creates a platform spec from OCI platform
-func PlatformSpecFromOCI(p *ocispec.Platform) *manifestlist.PlatformSpec {
- if p == nil {
- return nil
- }
- return &manifestlist.PlatformSpec{
- Architecture: p.Architecture,
- OS: p.OS,
- OSVersion: p.OSVersion,
- OSFeatures: p.OSFeatures,
- Variant: p.Variant,
- }
-}
-
-// Blobs returns the digests for all the blobs referenced by this manifest
-func (i ImageManifest) Blobs() []digest.Digest {
- var digests []digest.Digest
- switch {
- case i.SchemaV2Manifest != nil:
- refs := i.SchemaV2Manifest.References()
- digests = make([]digest.Digest, 0, len(refs))
- for _, descriptor := range refs {
- digests = append(digests, descriptor.Digest)
- }
- case i.OCIManifest != nil:
- refs := i.OCIManifest.References()
- digests = make([]digest.Digest, 0, len(refs))
- for _, descriptor := range refs {
- digests = append(digests, descriptor.Digest)
- }
- }
- return digests
-}
-
-// Payload returns the media type and bytes for the manifest
-func (i ImageManifest) Payload() (string, []byte, error) {
- // TODO: If available, read content from a content store by digest
- switch {
- case i.SchemaV2Manifest != nil:
- return i.SchemaV2Manifest.Payload()
- case i.OCIManifest != nil:
- return i.OCIManifest.Payload()
- default:
- return "", nil, errors.Errorf("%s has no payload", i.Ref)
- }
-}
-
-// References implements the distribution.Manifest interface. It delegates to
-// the underlying manifest.
-func (i ImageManifest) References() []distribution.Descriptor {
- switch {
- case i.SchemaV2Manifest != nil:
- return i.SchemaV2Manifest.References()
- case i.OCIManifest != nil:
- return i.OCIManifest.References()
- default:
- return nil
- }
-}
-
-// NewImageManifest returns a new ImageManifest object. The values for Platform
-// are initialized from those in the image
-func NewImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *schema2.DeserializedManifest) ImageManifest {
- raw, err := manifest.MarshalJSON()
- if err != nil {
- raw = nil
- }
-
- return ImageManifest{
- Ref: &SerializableNamed{Named: ref},
- Descriptor: desc,
- Raw: raw,
- SchemaV2Manifest: manifest,
- }
-}
-
-// NewOCIImageManifest returns a new ImageManifest object. The values for
-// Platform are initialized from those in the image
-func NewOCIImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *ocischema.DeserializedManifest) ImageManifest {
- raw, err := manifest.MarshalJSON()
- if err != nil {
- raw = nil
- }
-
- return ImageManifest{
- Ref: &SerializableNamed{Named: ref},
- Descriptor: desc,
- Raw: raw,
- OCIManifest: manifest,
- }
-}
-
-// SerializableNamed is a reference.Named that can be serialized and deserialized
-// from JSON
-type SerializableNamed struct {
- reference.Named
-}
-
-// UnmarshalJSON loads the Named reference from JSON bytes
-func (s *SerializableNamed) UnmarshalJSON(b []byte) error {
- var raw string
- if err := json.Unmarshal(b, &raw); err != nil {
- return errors.Wrapf(err, "invalid named reference bytes: %s", b)
- }
- var err error
- s.Named, err = reference.ParseNamed(raw)
- return err
-}
-
-// MarshalJSON returns the JSON bytes representation
-func (s *SerializableNamed) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
-}
diff --git a/vendor/github.com/docker/cli/cli/registry/client/client.go b/vendor/github.com/docker/cli/cli/registry/client/client.go
deleted file mode 100644
index bbc7f4c5..00000000
--- a/vendor/github.com/docker/cli/cli/registry/client/client.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package client
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
-
- "github.com/distribution/reference"
- manifesttypes "github.com/docker/cli/cli/manifest/types"
- "github.com/docker/cli/cli/trust"
- "github.com/docker/distribution"
- distributionclient "github.com/docker/distribution/registry/client"
- registrytypes "github.com/docker/docker/api/types/registry"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// RegistryClient is a client used to communicate with a Docker distribution
-// registry
-type RegistryClient interface {
- GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error)
- GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
- MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
- PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
-}
-
-// NewRegistryClient returns a new RegistryClient with a resolver
-func NewRegistryClient(resolver AuthConfigResolver, userAgent string, insecure bool) RegistryClient {
- return &client{
- authConfigResolver: resolver,
- insecureRegistry: insecure,
- userAgent: userAgent,
- }
-}
-
-// AuthConfigResolver returns Auth Configuration for an index
-type AuthConfigResolver func(ctx context.Context, index *registrytypes.IndexInfo) registrytypes.AuthConfig
-
-// PutManifestOptions is the data sent to push a manifest
-type PutManifestOptions struct {
- MediaType string
- Payload []byte
-}
-
-type client struct {
- authConfigResolver AuthConfigResolver
- insecureRegistry bool
- userAgent string
-}
-
-// ErrBlobCreated returned when a blob mount request was created
-type ErrBlobCreated struct {
- From reference.Named
- Target reference.Named
-}
-
-func (err ErrBlobCreated) Error() string {
- return fmt.Sprintf("blob mounted from: %v to: %v",
- err.From, err.Target)
-}
-
-// ErrHTTPProto returned if attempting to use TLS with a non-TLS registry
-type ErrHTTPProto struct {
- OrigErr string
-}
-
-func (err ErrHTTPProto) Error() string {
- return err.OrigErr
-}
-
-var _ RegistryClient = &client{}
-
-// MountBlob into the registry, so it can be referenced by a manifest
-func (c *client) MountBlob(ctx context.Context, sourceRef reference.Canonical, targetRef reference.Named) error {
- repoEndpoint, err := newDefaultRepositoryEndpoint(targetRef, c.insecureRegistry)
- if err != nil {
- return err
- }
- repoEndpoint.actions = trust.ActionsPushAndPull
- repo, err := c.getRepositoryForReference(ctx, targetRef, repoEndpoint)
- if err != nil {
- return err
- }
- lu, err := repo.Blobs(ctx).Create(ctx, distributionclient.WithMountFrom(sourceRef))
- switch err.(type) {
- case distribution.ErrBlobMounted:
- logrus.Debugf("mount of blob %s succeeded", sourceRef)
- return nil
- case nil:
- default:
- return errors.Wrapf(err, "failed to mount blob %s to %s", sourceRef, targetRef)
- }
- lu.Cancel(ctx)
- logrus.Debugf("mount of blob %s created", sourceRef)
- return ErrBlobCreated{From: sourceRef, Target: targetRef}
-}
-
-// PutManifest sends the manifest to a registry and returns the new digest
-func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
- repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
- if err != nil {
- return "", err
- }
-
- repoEndpoint.actions = trust.ActionsPushAndPull
- repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
- if err != nil {
- return "", err
- }
-
- manifestService, err := repo.Manifests(ctx)
- if err != nil {
- return "", err
- }
-
- _, opts, err := getManifestOptionsFromReference(ref)
- if err != nil {
- return "", err
- }
-
- dgst, err := manifestService.Put(ctx, manifest, opts...)
- return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
-}
-
-func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
- repoName, err := reference.WithName(repoEndpoint.Name())
- if err != nil {
- return nil, errors.Wrapf(err, "failed to parse repo name from %s", ref)
- }
- httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
- if err != nil {
- if !strings.Contains(err.Error(), "server gave HTTP response to HTTPS client") {
- return nil, err
- }
- if !repoEndpoint.endpoint.TLSConfig.InsecureSkipVerify {
- return nil, ErrHTTPProto{OrigErr: err.Error()}
- }
- // --insecure was set; fall back to plain HTTP
- if url := repoEndpoint.endpoint.URL; url != nil && url.Scheme == "https" {
- url.Scheme = "http"
- httpTransport, err = c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
- if err != nil {
- return nil, err
- }
- }
- }
- return distributionclient.NewRepository(repoName, repoEndpoint.BaseURL(), httpTransport)
-}
-
-func (c *client) getHTTPTransportForRepoEndpoint(ctx context.Context, repoEndpoint repositoryEndpoint) (http.RoundTripper, error) {
- httpTransport, err := getHTTPTransport(
- c.authConfigResolver(ctx, repoEndpoint.info.Index),
- repoEndpoint.endpoint,
- repoEndpoint.Name(),
- c.userAgent,
- repoEndpoint.actions,
- )
- return httpTransport, errors.Wrap(err, "failed to configure transport")
-}
-
-// GetManifest returns an ImageManifest for the reference
-func (c *client) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
- var result manifesttypes.ImageManifest
- fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
- var err error
- result, err = fetchManifest(ctx, repo, ref)
- return result.Ref != nil, err
- }
-
- err := c.iterateEndpoints(ctx, ref, fetch)
- return result, err
-}
-
-// GetManifestList returns a list of ImageManifest for the reference
-func (c *client) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
- result := []manifesttypes.ImageManifest{}
- fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
- var err error
- result, err = fetchList(ctx, repo, ref)
- return len(result) > 0, err
- }
-
- err := c.iterateEndpoints(ctx, ref, fetch)
- return result, err
-}
-
-func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []distribution.ManifestServiceOption, error) {
- if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
- tag := tagged.Tag()
- return "", []distribution.ManifestServiceOption{distribution.WithTag(tag)}, nil
- }
- if digested, isDigested := ref.(reference.Canonical); isDigested {
- return digested.Digest(), []distribution.ManifestServiceOption{}, nil
- }
- return "", nil, errors.Errorf("%s no tag or digest", ref)
-}
diff --git a/vendor/github.com/docker/cli/cli/registry/client/endpoint.go b/vendor/github.com/docker/cli/cli/registry/client/endpoint.go
deleted file mode 100644
index 95312b05..00000000
--- a/vendor/github.com/docker/cli/cli/registry/client/endpoint.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package client
-
-import (
- "net"
- "net/http"
- "time"
-
- "github.com/distribution/reference"
- "github.com/docker/cli/cli/trust"
- "github.com/docker/distribution/registry/client/auth"
- "github.com/docker/distribution/registry/client/transport"
- registrytypes "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/registry"
- "github.com/pkg/errors"
-)
-
-type repositoryEndpoint struct {
- info *registry.RepositoryInfo
- endpoint registry.APIEndpoint
- actions []string
-}
-
-// Name returns the repository name
-func (r repositoryEndpoint) Name() string {
- return reference.Path(r.info.Name)
-}
-
-// BaseURL returns the endpoint url
-func (r repositoryEndpoint) BaseURL() string {
- return r.endpoint.URL.String()
-}
-
-func newDefaultRepositoryEndpoint(ref reference.Named, insecure bool) (repositoryEndpoint, error) {
- repoInfo, err := registry.ParseRepositoryInfo(ref)
- if err != nil {
- return repositoryEndpoint{}, err
- }
- endpoint, err := getDefaultEndpointFromRepoInfo(repoInfo)
- if err != nil {
- return repositoryEndpoint{}, err
- }
- if insecure {
- endpoint.TLSConfig.InsecureSkipVerify = true
- }
- return repositoryEndpoint{info: repoInfo, endpoint: endpoint}, nil
-}
-
-func getDefaultEndpointFromRepoInfo(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, error) {
- var err error
-
- options := registry.ServiceOptions{}
- registryService, err := registry.NewService(options)
- if err != nil {
- return registry.APIEndpoint{}, err
- }
- endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
- if err != nil {
- return registry.APIEndpoint{}, err
- }
- // Default to the highest priority endpoint to return
- endpoint := endpoints[0]
- if !repoInfo.Index.Secure {
- for _, ep := range endpoints {
- if ep.URL.Scheme == "http" {
- endpoint = ep
- }
- }
- }
- return endpoint, nil
-}
-
-// getHTTPTransport builds a transport for use in communicating with a registry
-func getHTTPTransport(authConfig registrytypes.AuthConfig, endpoint registry.APIEndpoint, repoName, userAgent string, actions []string) (http.RoundTripper, error) {
- // get the http transport, this will be used in a client to upload manifest
- base := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: endpoint.TLSConfig,
- DisableKeepAlives: true,
- }
-
- modifiers := registry.Headers(userAgent, http.Header{})
- authTransport := transport.NewTransport(base, modifiers...)
- challengeManager, err := registry.PingV2Registry(endpoint.URL, authTransport)
- if err != nil {
- return nil, errors.Wrap(err, "error pinging v2 registry")
- }
- if authConfig.RegistryToken != "" {
- passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
- modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
- } else {
- if len(actions) == 0 {
- actions = trust.ActionsPullOnly
- }
- creds := registry.NewStaticCredentialStore(&authConfig)
- tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...)
- basicHandler := auth.NewBasicHandler(creds)
- modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
- }
- return transport.NewTransport(base, modifiers...), nil
-}
-
-// RepoNameForReference returns the repository name from a reference
-func RepoNameForReference(ref reference.Named) (string, error) {
- // insecure is fine since this only returns the name
- repo, err := newDefaultRepositoryEndpoint(ref, false)
- if err != nil {
- return "", err
- }
- return repo.Name(), nil
-}
-
-type existingTokenHandler struct {
- token string
-}
-
-func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, _ map[string]string) error {
- req.Header.Set("Authorization", "Bearer "+th.token)
- return nil
-}
-
-func (*existingTokenHandler) Scheme() string {
- return "bearer"
-}
diff --git a/vendor/github.com/docker/cli/cli/registry/client/fetcher.go b/vendor/github.com/docker/cli/cli/registry/client/fetcher.go
deleted file mode 100644
index d1f255bf..00000000
--- a/vendor/github.com/docker/cli/cli/registry/client/fetcher.go
+++ /dev/null
@@ -1,307 +0,0 @@
-package client
-
-import (
- "context"
- "encoding/json"
-
- "github.com/distribution/reference"
- "github.com/docker/cli/cli/manifest/types"
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest/manifestlist"
- "github.com/docker/distribution/manifest/ocischema"
- "github.com/docker/distribution/manifest/schema2"
- "github.com/docker/distribution/registry/api/errcode"
- v2 "github.com/docker/distribution/registry/api/v2"
- distclient "github.com/docker/distribution/registry/client"
- "github.com/docker/docker/registry"
- "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// fetchManifest pulls a manifest from a registry and returns it. An error
-// is returned if no manifest is found matching namedRef.
-func fetchManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (types.ImageManifest, error) {
- manifest, err := getManifest(ctx, repo, ref)
- if err != nil {
- return types.ImageManifest{}, err
- }
-
- switch v := manifest.(type) {
- // Removed Schema 1 support
- case *schema2.DeserializedManifest:
- return pullManifestSchemaV2(ctx, ref, repo, *v)
- case *ocischema.DeserializedManifest:
- return pullManifestOCISchema(ctx, ref, repo, *v)
- case *manifestlist.DeserializedManifestList:
- return types.ImageManifest{}, errors.Errorf("%s is a manifest list", ref)
- }
- return types.ImageManifest{}, errors.Errorf("%s is not a manifest", ref)
-}
-
-func fetchList(ctx context.Context, repo distribution.Repository, ref reference.Named) ([]types.ImageManifest, error) {
- manifest, err := getManifest(ctx, repo, ref)
- if err != nil {
- return nil, err
- }
-
- switch v := manifest.(type) {
- case *manifestlist.DeserializedManifestList:
- return pullManifestList(ctx, ref, repo, *v)
- default:
- return nil, errors.Errorf("unsupported manifest format: %v", v)
- }
-}
-
-func getManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (distribution.Manifest, error) {
- manSvc, err := repo.Manifests(ctx)
- if err != nil {
- return nil, err
- }
-
- dgst, opts, err := getManifestOptionsFromReference(ref)
- if err != nil {
- return nil, errors.Errorf("image manifest for %q does not exist", ref)
- }
- return manSvc.Get(ctx, dgst, opts...)
-}
-
-func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst schema2.DeserializedManifest) (types.ImageManifest, error) {
- manifestDesc, err := validateManifestDigest(ref, mfst)
- if err != nil {
- return types.ImageManifest{}, err
- }
- configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo)
- if err != nil {
- return types.ImageManifest{}, err
- }
-
- if manifestDesc.Platform == nil {
- manifestDesc.Platform = &ocispec.Platform{}
- }
-
- // Fill in os and architecture fields from config JSON
- if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
- return types.ImageManifest{}, err
- }
-
- return types.NewImageManifest(ref, manifestDesc, &mfst), nil
-}
-
-func pullManifestOCISchema(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst ocischema.DeserializedManifest) (types.ImageManifest, error) {
- manifestDesc, err := validateManifestDigest(ref, mfst)
- if err != nil {
- return types.ImageManifest{}, err
- }
- configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo)
- if err != nil {
- return types.ImageManifest{}, err
- }
-
- if manifestDesc.Platform == nil {
- manifestDesc.Platform = &ocispec.Platform{}
- }
-
- // Fill in os and architecture fields from config JSON
- if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
- return types.ImageManifest{}, err
- }
-
- return types.NewOCIImageManifest(ref, manifestDesc, &mfst), nil
-}
-
-func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, repo distribution.Repository) ([]byte, error) {
- blobs := repo.Blobs(ctx)
- configJSON, err := blobs.Get(ctx, dgst)
- if err != nil {
- return nil, err
- }
-
- verifier := dgst.Verifier()
- if _, err := verifier.Write(configJSON); err != nil {
- return nil, err
- }
- if !verifier.Verified() {
- return nil, errors.Errorf("image config verification failed for digest %s", dgst)
- }
- return configJSON, nil
-}
-
-// validateManifestDigest computes the manifest digest, and, if pulling by
-// digest, ensures that it matches the requested digest.
-func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (ocispec.Descriptor, error) {
- mediaType, canonical, err := mfst.Payload()
- if err != nil {
- return ocispec.Descriptor{}, err
- }
- desc := ocispec.Descriptor{
- Digest: digest.FromBytes(canonical),
- Size: int64(len(canonical)),
- MediaType: mediaType,
- }
-
- // If pull by digest, then verify the manifest digest.
- if digested, isDigested := ref.(reference.Canonical); isDigested && digested.Digest() != desc.Digest {
- return ocispec.Descriptor{}, errors.Errorf("manifest verification failed for digest %s", digested.Digest())
- }
-
- return desc, nil
-}
-
-// pullManifestList handles "manifest lists" which point to various
-// platform-specific manifests.
-func pullManifestList(ctx context.Context, ref reference.Named, repo distribution.Repository, mfstList manifestlist.DeserializedManifestList) ([]types.ImageManifest, error) {
- if _, err := validateManifestDigest(ref, mfstList); err != nil {
- return nil, err
- }
-
- infos := make([]types.ImageManifest, 0, len(mfstList.Manifests))
- for _, manifestDescriptor := range mfstList.Manifests {
- manSvc, err := repo.Manifests(ctx)
- if err != nil {
- return nil, err
- }
- manifest, err := manSvc.Get(ctx, manifestDescriptor.Digest)
- if err != nil {
- return nil, err
- }
-
- manifestRef, err := reference.WithDigest(ref, manifestDescriptor.Digest)
- if err != nil {
- return nil, err
- }
-
- var imageManifest types.ImageManifest
- switch v := manifest.(type) {
- case *schema2.DeserializedManifest:
- imageManifest, err = pullManifestSchemaV2(ctx, manifestRef, repo, *v)
- case *ocischema.DeserializedManifest:
- imageManifest, err = pullManifestOCISchema(ctx, manifestRef, repo, *v)
- default:
- err = errors.Errorf("unsupported manifest type: %T", manifest)
- }
- if err != nil {
- return nil, err
- }
-
- // Replace platform from config
- p := manifestDescriptor.Platform
- imageManifest.Descriptor.Platform = types.OCIPlatform(&p)
-
- infos = append(infos, imageManifest)
- }
- return infos, nil
-}
-
-func continueOnError(err error) bool {
- switch v := err.(type) {
- case errcode.Errors:
- if len(v) == 0 {
- return true
- }
- return continueOnError(v[0])
- case errcode.Error:
- switch e := err.(errcode.Error); e.Code {
- case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
- return true
- default:
- return false
- }
- case *distclient.UnexpectedHTTPResponseError:
- return true
- }
- return false
-}
-
-func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error {
- endpoints, err := allEndpoints(namedRef, c.insecureRegistry)
- if err != nil {
- return err
- }
-
- repoInfo, err := registry.ParseRepositoryInfo(namedRef)
- if err != nil {
- return err
- }
-
- confirmedTLSRegistries := make(map[string]bool)
- for _, endpoint := range endpoints {
- if endpoint.URL.Scheme != "https" {
- if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
- logrus.Debugf("skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
- continue
- }
- }
-
- if c.insecureRegistry {
- endpoint.TLSConfig.InsecureSkipVerify = true
- }
- repoEndpoint := repositoryEndpoint{endpoint: endpoint, info: repoInfo}
- repo, err := c.getRepositoryForReference(ctx, namedRef, repoEndpoint)
- if err != nil {
- logrus.Debugf("error %s with repo endpoint %+v", err, repoEndpoint)
- if _, ok := err.(ErrHTTPProto); ok {
- continue
- }
- return err
- }
-
- if endpoint.URL.Scheme == "http" && !c.insecureRegistry {
- logrus.Debugf("skipping non-tls registry endpoint: %s", endpoint.URL)
- continue
- }
- done, err := each(ctx, repo, namedRef)
- if err != nil {
- if continueOnError(err) {
- if endpoint.URL.Scheme == "https" {
- confirmedTLSRegistries[endpoint.URL.Host] = true
- }
- logrus.Debugf("continuing on error (%T) %s", err, err)
- continue
- }
- logrus.Debugf("not continuing on error (%T) %s", err, err)
- return err
- }
- if done {
- return nil
- }
- }
- return newNotFoundError(namedRef.String())
-}
-
-// allEndpoints returns a list of endpoints ordered by priority (v2, http).
-func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) {
- repoInfo, err := registry.ParseRepositoryInfo(namedRef)
- if err != nil {
- return nil, err
- }
-
- var serviceOpts registry.ServiceOptions
- if insecure {
- logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef))
- serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)}
- }
- registryService, err := registry.NewService(serviceOpts)
- if err != nil {
- return []registry.APIEndpoint{}, err
- }
- endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
- logrus.Debugf("endpoints for %s: %v", namedRef, endpoints)
- return endpoints, err
-}
-
-func newNotFoundError(ref string) *notFoundError {
- return ¬FoundError{err: errors.New("no such manifest: " + ref)}
-}
-
-type notFoundError struct {
- err error
-}
-
-func (n *notFoundError) Error() string {
- return n.err.Error()
-}
-
-// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound
-func (notFoundError) NotFound() {}
diff --git a/vendor/github.com/docker/cli/cli/trust/trust.go b/vendor/github.com/docker/cli/cli/trust/trust.go
deleted file mode 100644
index bb7e597a..00000000
--- a/vendor/github.com/docker/cli/cli/trust/trust.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package trust
-
-import (
- "context"
- "encoding/json"
- "io"
- "net"
- "net/http"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "time"
-
- "github.com/distribution/reference"
- "github.com/docker/cli/cli/config"
- "github.com/docker/distribution/registry/client/auth"
- "github.com/docker/distribution/registry/client/auth/challenge"
- "github.com/docker/distribution/registry/client/transport"
- registrytypes "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/registry"
- "github.com/docker/go-connections/tlsconfig"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/theupdateframework/notary"
- "github.com/theupdateframework/notary/client"
- "github.com/theupdateframework/notary/passphrase"
- "github.com/theupdateframework/notary/storage"
- "github.com/theupdateframework/notary/trustmanager"
- "github.com/theupdateframework/notary/trustpinning"
- "github.com/theupdateframework/notary/tuf/data"
- "github.com/theupdateframework/notary/tuf/signed"
-)
-
-var (
- // ReleasesRole is the role named "releases"
- ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases"))
- // ActionsPullOnly defines the actions for read-only interactions with a Notary Repository
- ActionsPullOnly = []string{"pull"}
- // ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository
- ActionsPushAndPull = []string{"pull", "push"}
- // NotaryServer is the endpoint serving the Notary trust server
- NotaryServer = "https://notary.docker.io"
-)
-
-// GetTrustDirectory returns the base trust directory name
-func GetTrustDirectory() string {
- return filepath.Join(config.Dir(), "trust")
-}
-
-// certificateDirectory returns the directory containing
-// TLS certificates for the given server. An error is
-// returned if there was an error parsing the server string.
-func certificateDirectory(server string) (string, error) {
- u, err := url.Parse(server)
- if err != nil {
- return "", err
- }
-
- return filepath.Join(config.Dir(), "tls", u.Host), nil
-}
-
-// Server returns the base URL for the trust server.
-func Server(index *registrytypes.IndexInfo) (string, error) {
- if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" {
- urlObj, err := url.Parse(s)
- if err != nil || urlObj.Scheme != "https" {
- return "", errors.Errorf("valid https URL required for trust server, got %s", s)
- }
-
- return s, nil
- }
- if index.Official {
- return NotaryServer, nil
- }
- return "https://" + index.Name, nil
-}
-
-type simpleCredentialStore struct {
- auth registrytypes.AuthConfig
-}
-
-func (scs simpleCredentialStore) Basic(*url.URL) (string, string) {
- return scs.auth.Username, scs.auth.Password
-}
-
-func (scs simpleCredentialStore) RefreshToken(*url.URL, string) string {
- return scs.auth.IdentityToken
-}
-
-func (simpleCredentialStore) SetRefreshToken(*url.URL, string, string) {}
-
-// GetNotaryRepository returns a NotaryRepository which stores all the
-// information needed to operate on a notary repository.
-// It creates an HTTP transport providing authentication support.
-func GetNotaryRepository(in io.Reader, out io.Writer, userAgent string, repoInfo *registry.RepositoryInfo, authConfig *registrytypes.AuthConfig, actions ...string) (client.Repository, error) {
- server, err := Server(repoInfo.Index)
- if err != nil {
- return nil, err
- }
-
- cfg := tlsconfig.ClientDefault()
- cfg.InsecureSkipVerify = !repoInfo.Index.Secure
-
- // Get certificate base directory
- certDir, err := certificateDirectory(server)
- if err != nil {
- return nil, err
- }
- logrus.Debugf("reading certificate directory: %s", certDir)
-
- if err := registry.ReadCertsDirectory(cfg, certDir); err != nil {
- return nil, err
- }
-
- base := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: cfg,
- DisableKeepAlives: true,
- }
-
- // Skip configuration headers since request is not going to Docker daemon
- modifiers := registry.Headers(userAgent, http.Header{})
- authTransport := transport.NewTransport(base, modifiers...)
- pingClient := &http.Client{
- Transport: authTransport,
- Timeout: 5 * time.Second,
- }
- endpointStr := server + "/v2/"
- req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
- if err != nil {
- return nil, err
- }
-
- challengeManager := challenge.NewSimpleManager()
-
- resp, err := pingClient.Do(req)
- if err != nil {
- // Ignore error on ping to operate in offline mode
- logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err)
- } else {
- defer resp.Body.Close()
-
- // Add response to the challenge manager to parse out
- // authentication header and register authentication method
- if err := challengeManager.AddResponse(resp); err != nil {
- return nil, err
- }
- }
-
- scope := auth.RepositoryScope{
- Repository: repoInfo.Name.Name(),
- Actions: actions,
- }
- creds := simpleCredentialStore{auth: *authConfig}
- tokenHandlerOptions := auth.TokenHandlerOptions{
- Transport: authTransport,
- Credentials: creds,
- Scopes: []auth.Scope{scope},
- ClientID: registry.AuthClientID,
- }
- tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
- basicHandler := auth.NewBasicHandler(creds)
- modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
- tr := transport.NewTransport(base, modifiers...)
-
- return client.NewFileCachedRepository(
- GetTrustDirectory(),
- data.GUN(repoInfo.Name.Name()),
- server,
- tr,
- GetPassphraseRetriever(in, out),
- trustpinning.TrustPinConfig{})
-}
-
-// GetPassphraseRetriever returns a passphrase retriever that utilizes Content Trust env vars
-func GetPassphraseRetriever(in io.Reader, out io.Writer) notary.PassRetriever {
- aliasMap := map[string]string{
- "root": "root",
- "snapshot": "repository",
- "targets": "repository",
- "default": "repository",
- }
- baseRetriever := passphrase.PromptRetrieverWithInOut(in, out, aliasMap)
- env := map[string]string{
- "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
- "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
- "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
- "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
- }
-
- return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
- if v := env[alias]; v != "" {
- return v, numAttempts > 1, nil
- }
- // For non-root roles, we can also try the "default" alias if it is specified
- if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() {
- return v, numAttempts > 1, nil
- }
- return baseRetriever(keyName, alias, createNew, numAttempts)
- }
-}
-
-// NotaryError formats an error message received from the notary service
-func NotaryError(repoName string, err error) error {
- switch err.(type) {
- case *json.SyntaxError:
- logrus.Debugf("Notary syntax error: %s", err)
- return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
- case signed.ErrExpired:
- return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
- case trustmanager.ErrKeyNotFound:
- return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
- case storage.NetworkError:
- return errors.Errorf("Error: error contacting notary server: %v", err)
- case storage.ErrMetaNotFound:
- return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err)
- case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType:
- return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err)
- case signed.ErrNoKeys:
- return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err)
- case signed.ErrLowVersion:
- return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err)
- case signed.ErrRoleThreshold:
- return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err)
- case client.ErrRepositoryNotExist:
- return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err)
- case signed.ErrInsufficientSignatures:
- return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err)
- }
-
- return err
-}
-
-// GetSignableRoles returns a list of roles for which we have valid signing
-// keys, given a notary repository and a target
-func GetSignableRoles(repo client.Repository, target *client.Target) ([]data.RoleName, error) {
- var signableRoles []data.RoleName
-
- // translate the full key names, which includes the GUN, into just the key IDs
- allCanonicalKeyIDs := make(map[string]struct{})
- for fullKeyID := range repo.GetCryptoService().ListAllKeys() {
- allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
- }
-
- allDelegationRoles, err := repo.GetDelegationRoles()
- if err != nil {
- return signableRoles, err
- }
-
- // if there are no delegation roles, then just try to sign it into the targets role
- if len(allDelegationRoles) == 0 {
- signableRoles = append(signableRoles, data.CanonicalTargetsRole)
- return signableRoles, nil
- }
-
- // there are delegation roles, find every delegation role we have a key for,
- // and attempt to sign in to all those roles.
- for _, delegationRole := range allDelegationRoles {
- // We do not support signing any delegation role that isn't a direct child of the targets role.
- // Also don't bother checking the keys if we can't add the target
- // to this role due to path restrictions
- if path.Dir(delegationRole.Name.String()) != data.CanonicalTargetsRole.String() || !delegationRole.CheckPaths(target.Name) {
- continue
- }
-
- for _, canonicalKeyID := range delegationRole.KeyIDs {
- if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
- signableRoles = append(signableRoles, delegationRole.Name)
- break
- }
- }
- }
-
- if len(signableRoles) == 0 {
- return signableRoles, errors.Errorf("no valid signing keys for delegation roles")
- }
-
- return signableRoles, nil
-}
-
-// ImageRefAndAuth contains all reference information and the auth config for an image request
-type ImageRefAndAuth struct {
- original string
- authConfig *registrytypes.AuthConfig
- reference reference.Named
- repoInfo *registry.RepositoryInfo
- tag string
- digest digest.Digest
-}
-
-// GetImageReferencesAndAuth retrieves the necessary reference and auth information for an image name
-// as an ImageRefAndAuth struct
-func GetImageReferencesAndAuth(ctx context.Context,
- authResolver func(ctx context.Context, index *registrytypes.IndexInfo) registrytypes.AuthConfig,
- imgName string,
-) (ImageRefAndAuth, error) {
- ref, err := reference.ParseNormalizedNamed(imgName)
- if err != nil {
- return ImageRefAndAuth{}, err
- }
-
- // Resolve the Repository name from fqn to RepositoryInfo
- repoInfo, err := registry.ParseRepositoryInfo(ref)
- if err != nil {
- return ImageRefAndAuth{}, err
- }
-
- authConfig := authResolver(ctx, repoInfo.Index)
- return ImageRefAndAuth{
- original: imgName,
- authConfig: &authConfig,
- reference: ref,
- repoInfo: repoInfo,
- tag: getTag(ref),
- digest: getDigest(ref),
- }, nil
-}
-
-func getTag(ref reference.Named) string {
- switch x := ref.(type) {
- case reference.Canonical, reference.Digested:
- return ""
- case reference.NamedTagged:
- return x.Tag()
- default:
- return ""
- }
-}
-
-func getDigest(ref reference.Named) digest.Digest {
- switch x := ref.(type) {
- case reference.Canonical:
- return x.Digest()
- case reference.Digested:
- return x.Digest()
- default:
- return digest.Digest("")
- }
-}
-
-// AuthConfig returns the auth information (username, etc) for a given ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) AuthConfig() *registrytypes.AuthConfig {
- return imgRefAuth.authConfig
-}
-
-// Reference returns the Image reference for a given ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) Reference() reference.Named {
- return imgRefAuth.reference
-}
-
-// RepoInfo returns the repository information for a given ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) RepoInfo() *registry.RepositoryInfo {
- return imgRefAuth.repoInfo
-}
-
-// Tag returns the Image tag for a given ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) Tag() string {
- return imgRefAuth.tag
-}
-
-// Digest returns the Image digest for a given ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) Digest() digest.Digest {
- return imgRefAuth.digest
-}
-
-// Name returns the image name used to initialize the ImageRefAndAuth
-func (imgRefAuth *ImageRefAndAuth) Name() string {
- return imgRefAuth.original
-}
diff --git a/vendor/github.com/docker/cli/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/cli/internal/lazyregexp/lazyregexp.go
new file mode 100644
index 00000000..62e29c55
--- /dev/null
+++ b/vendor/github.com/docker/cli/internal/lazyregexp/lazyregexp.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code below was largely copied from golang.org/x/mod@v0.22;
+// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go
+// with some additional methods added.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+)
+
+// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+ str string
+ once sync.Once
+ rx *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+ r.once.Do(r.build)
+ return r.rx
+}
+
+func (r *Regexp) build() {
+ r.rx = regexp.MustCompile(r.str)
+ r.str = ""
+}
+
+func (r *Regexp) FindSubmatch(s []byte) [][]byte {
+ return r.re().FindSubmatch(s)
+}
+
+func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
+ return r.re().FindAllStringSubmatch(s, n)
+}
+
+func (r *Regexp) FindStringSubmatch(s string) []string {
+ return r.re().FindStringSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatchIndex(s string) []int {
+ return r.re().FindStringSubmatchIndex(s)
+}
+
+func (r *Regexp) ReplaceAllString(src, repl string) string {
+ return r.re().ReplaceAllString(src, repl)
+}
+
+func (r *Regexp) FindString(s string) string {
+ return r.re().FindString(s)
+}
+
+func (r *Regexp) FindAllString(s string, n int) []string {
+ return r.re().FindAllString(s, n)
+}
+
+func (r *Regexp) MatchString(s string) bool {
+ return r.re().MatchString(s)
+}
+
+func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
+ return r.re().ReplaceAllStringFunc(src, repl)
+}
+
+func (r *Regexp) ReplaceAllLiteralString(src, repl string) string {
+ return r.re().ReplaceAllLiteralString(src, repl)
+}
+
+func (r *Regexp) String() string {
+ return r.re().String()
+}
+
+func (r *Regexp) SubexpNames() []string {
+ return r.re().SubexpNames()
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+ lr := &Regexp{str: str}
+ if inTest {
+ // In tests, always compile the regexps early.
+ lr.re()
+ }
+ return lr
+}
diff --git a/vendor/github.com/docker/cli/internal/prompt/prompt.go b/vendor/github.com/docker/cli/internal/prompt/prompt.go
new file mode 100644
index 00000000..4d47a10e
--- /dev/null
+++ b/vendor/github.com/docker/cli/internal/prompt/prompt.go
@@ -0,0 +1,116 @@
+// Package prompt provides utilities to prompt the user for input.
+
+package prompt
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/docker/cli/cli/streams"
+ "github.com/moby/term"
+)
+
+const ErrTerminated cancelledErr = "prompt terminated"
+
+type cancelledErr string
+
+func (e cancelledErr) Error() string {
+ return string(e)
+}
+
+func (cancelledErr) Cancelled() {}
+
+// DisableInputEcho disables input echo on the provided streams.In.
+// This is useful when the user provides sensitive information like passwords.
+// The function returns a restore function that should be called to restore the
+// terminal state.
+//
+// TODO(thaJeztah): implement without depending on streams?
+func DisableInputEcho(ins *streams.In) (restore func() error, _ error) {
+ oldState, err := term.SaveState(ins.FD())
+ if err != nil {
+ return nil, err
+ }
+ restore = func() error {
+ return term.RestoreTerminal(ins.FD(), oldState)
+ }
+ return restore, term.DisableEcho(ins.FD(), oldState)
+}
+
+// ReadInput requests input from the user.
+//
+// It returns an empty string ("") with an [ErrTerminated] if the user terminates
+// the CLI with SIGINT or SIGTERM while the prompt is active. If the prompt
+// returns an error, the caller should close the [io.Reader] used for the prompt
+// and propagate the error up the stack to prevent the background goroutine
+// from blocking indefinitely.
+func ReadInput(ctx context.Context, in io.Reader, out io.Writer, message string) (string, error) {
+ _, _ = out.Write([]byte(message))
+
+ result := make(chan string)
+ go func() {
+ scanner := bufio.NewScanner(in)
+ if scanner.Scan() {
+ result <- strings.TrimSpace(scanner.Text())
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ _, _ = out.Write([]byte("\n"))
+ return "", ErrTerminated
+ case r := <-result:
+ return r, nil
+ }
+}
+
+// Confirm requests and checks confirmation from the user.
+//
+// It displays the provided message followed by "[y/N]". If the user
+// input 'y' or 'Y' it returns true otherwise false. If no message is provided,
+// "Are you sure you want to proceed? [y/N] " will be used instead.
+//
+// It returns false with an [ErrTerminated] if the user terminates
+// the CLI with SIGINT or SIGTERM while the prompt is active. If the prompt
+// returns an error, the caller should close the [io.Reader] used for the prompt
+// and propagate the error up the stack to prevent the background goroutine
+// from blocking indefinitely.
+func Confirm(ctx context.Context, in io.Reader, out io.Writer, message string) (bool, error) {
+ if message == "" {
+ message = "Are you sure you want to proceed?"
+ }
+ message += " [y/N] "
+
+ _, _ = out.Write([]byte(message))
+
+ // On Windows, force the use of the regular OS stdin stream.
+ if runtime.GOOS == "windows" {
+ in = streams.NewIn(os.Stdin)
+ }
+
+ result := make(chan bool)
+
+ go func() {
+ var res bool
+ scanner := bufio.NewScanner(in)
+ if scanner.Scan() {
+ answer := strings.TrimSpace(scanner.Text())
+ if strings.EqualFold(answer, "y") {
+ res = true
+ }
+ }
+ result <- res
+ }()
+
+ select {
+ case <-ctx.Done():
+ _, _ = out.Write([]byte("\n"))
+ return false, ErrTerminated
+ case r := <-result:
+ return r, nil
+ }
+}
diff --git a/vendor/github.com/docker/cli/internal/tui/chip.go b/vendor/github.com/docker/cli/internal/tui/chip.go
index bb383109..02a9b8b8 100644
--- a/vendor/github.com/docker/cli/internal/tui/chip.go
+++ b/vendor/github.com/docker/cli/internal/tui/chip.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
diff --git a/vendor/github.com/docker/cli/internal/tui/colors.go b/vendor/github.com/docker/cli/internal/tui/colors.go
index 796aa390..d82d61dd 100644
--- a/vendor/github.com/docker/cli/internal/tui/colors.go
+++ b/vendor/github.com/docker/cli/internal/tui/colors.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
diff --git a/vendor/github.com/docker/cli/internal/tui/count.go b/vendor/github.com/docker/cli/internal/tui/count.go
index 319776e1..5d7ebd94 100644
--- a/vendor/github.com/docker/cli/internal/tui/count.go
+++ b/vendor/github.com/docker/cli/internal/tui/count.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
diff --git a/vendor/github.com/docker/cli/internal/tui/note.go b/vendor/github.com/docker/cli/internal/tui/note.go
index c955b8cd..d2bc0b9a 100644
--- a/vendor/github.com/docker/cli/internal/tui/note.go
+++ b/vendor/github.com/docker/cli/internal/tui/note.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
@@ -15,19 +15,39 @@ var InfoHeader = Str{
Fancy: aec.Bold.Apply(aec.LightCyanB.Apply(aec.BlackF.Apply("i")) + " " + aec.LightCyanF.Apply("Info → ")),
}
-func (o Output) PrintNote(format string, args ...any) {
+type options struct {
+ header Str
+}
+
+type noteOptions func(o *options)
+
+func withHeader(header Str) noteOptions {
+ return func(o *options) {
+ o.header = header
+ }
+}
+
+func (o Output) printNoteWithOptions(format string, args []any, opts ...noteOptions) {
if o.isTerminal {
// TODO: Handle all flags
format = strings.ReplaceAll(format, "--platform", ColorFlag.Apply("--platform"))
}
- header := o.Sprint(InfoHeader)
+ opt := &options{
+ header: InfoHeader,
+ }
- _, _ = fmt.Fprint(o, "\n", header)
+ for _, override := range opts {
+ override(opt)
+ }
+
+ h := o.Sprint(opt.header)
+
+ _, _ = fmt.Fprint(o, "\n", h)
s := fmt.Sprintf(format, args...)
for idx, line := range strings.Split(s, "\n") {
if idx > 0 {
- _, _ = fmt.Fprint(o, strings.Repeat(" ", Width(header)))
+ _, _ = fmt.Fprint(o, strings.Repeat(" ", Width(h)))
}
l := line
@@ -37,3 +57,16 @@ func (o Output) PrintNote(format string, args ...any) {
_, _ = fmt.Fprintln(o, l)
}
}
+
+func (o Output) PrintNote(format string, args ...any) {
+ o.printNoteWithOptions(format, args, withHeader(InfoHeader))
+}
+
+var warningHeader = Str{
+ Plain: " Warn -> ",
+ Fancy: aec.Bold.Apply(aec.LightYellowB.Apply(aec.BlackF.Apply("w")) + " " + ColorWarning.Apply("Warn → ")),
+}
+
+func (o Output) PrintWarning(format string, args ...any) {
+ o.printNoteWithOptions(format, args, withHeader(warningHeader))
+}
diff --git a/vendor/github.com/docker/cli/internal/tui/output.go b/vendor/github.com/docker/cli/internal/tui/output.go
index 7fc194ac..1f526d3f 100644
--- a/vendor/github.com/docker/cli/internal/tui/output.go
+++ b/vendor/github.com/docker/cli/internal/tui/output.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
diff --git a/vendor/github.com/docker/cli/internal/tui/str.go b/vendor/github.com/docker/cli/internal/tui/str.go
index 490e474f..c1ea9c95 100644
--- a/vendor/github.com/docker/cli/internal/tui/str.go
+++ b/vendor/github.com/docker/cli/internal/tui/str.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package tui
diff --git a/vendor/github.com/docker/cli/opts/duration.go b/vendor/github.com/docker/cli/opts/duration.go
index d55c51e6..41a27a56 100644
--- a/vendor/github.com/docker/cli/opts/duration.go
+++ b/vendor/github.com/docker/cli/opts/duration.go
@@ -1,9 +1,8 @@
package opts
import (
+ "errors"
"time"
-
- "github.com/pkg/errors"
)
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
@@ -20,7 +19,7 @@ func (d *PositiveDurationOpt) Set(s string) error {
return err
}
if *d.DurationOpt.value < 0 {
- return errors.Errorf("duration cannot be negative")
+ return errors.New("duration cannot be negative")
}
return nil
}
diff --git a/vendor/github.com/docker/cli/opts/env.go b/vendor/github.com/docker/cli/opts/env.go
index 214d6f44..675ddda9 100644
--- a/vendor/github.com/docker/cli/opts/env.go
+++ b/vendor/github.com/docker/cli/opts/env.go
@@ -1,10 +1,9 @@
package opts
import (
+ "errors"
"os"
"strings"
-
- "github.com/pkg/errors"
)
// ValidateEnv validates an environment variable and returns it.
diff --git a/vendor/github.com/docker/cli/opts/gpus.go b/vendor/github.com/docker/cli/opts/gpus.go
index 993f6da9..6a56c49c 100644
--- a/vendor/github.com/docker/cli/opts/gpus.go
+++ b/vendor/github.com/docker/cli/opts/gpus.go
@@ -2,12 +2,12 @@ package opts
import (
"encoding/csv"
+ "errors"
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/container"
- "github.com/pkg/errors"
)
// GpuOpts is a Value type for parsing mounts
@@ -20,7 +20,14 @@ func parseCount(s string) (int, error) {
return -1, nil
}
i, err := strconv.Atoi(s)
- return i, errors.Wrap(err, "count must be an integer")
+ if err != nil {
+ var numErr *strconv.NumError
+ if errors.As(err, &numErr) {
+ err = numErr.Err
+ }
+ return 0, fmt.Errorf(`invalid count (%s): value must be either "all" or an integer: %w`, s, err)
+ }
+ return i, nil
}
// Set a new mount value
@@ -69,7 +76,7 @@ func (o *GpuOpts) Set(value string) error {
r := csv.NewReader(strings.NewReader(val))
optFields, err := r.Read()
if err != nil {
- return errors.Wrap(err, "failed to read gpu options")
+ return fmt.Errorf("failed to read gpu options: %w", err)
}
req.Options = ConvertKVStringsToMap(optFields)
default:
diff --git a/vendor/github.com/docker/cli/opts/mount.go b/vendor/github.com/docker/cli/opts/mount.go
index 275a4d7f..05c1cd0b 100644
--- a/vendor/github.com/docker/cli/opts/mount.go
+++ b/vendor/github.com/docker/cli/opts/mount.go
@@ -100,7 +100,7 @@ func (m *MountOpt) Set(value string) error {
mount.Type = mounttypes.Type(strings.ToLower(val))
case "source", "src":
mount.Source = val
- if strings.HasPrefix(val, "."+string(filepath.Separator)) || val == "." {
+ if !filepath.IsAbs(val) && strings.HasPrefix(val, ".") {
if abs, err := filepath.Abs(val); err == nil {
mount.Source = abs
}
@@ -135,8 +135,7 @@ func (m *MountOpt) Set(value string) error {
// TODO: implicitly set propagation and error if the user specifies a propagation in a future refactor/UX polish pass
// https://github.com/docker/cli/pull/4316#discussion_r1341974730
default:
- return fmt.Errorf("invalid value for %s: %s (must be \"enabled\", \"disabled\", \"writable\", or \"readonly\")",
- key, val)
+ return fmt.Errorf(`invalid value for %s: %s (must be "enabled", "disabled", "writable", or "readonly")`, key, val)
}
case "volume-subpath":
volumeOptions().Subpath = val
diff --git a/vendor/github.com/docker/cli/opts/network.go b/vendor/github.com/docker/cli/opts/network.go
index c3510870..43b3a09d 100644
--- a/vendor/github.com/docker/cli/opts/network.go
+++ b/vendor/github.com/docker/cli/opts/network.go
@@ -89,7 +89,11 @@ func (n *NetworkOpt) Set(value string) error { //nolint:gocyclo
case gwPriorityOpt:
netOpt.GwPriority, err = strconv.Atoi(val)
if err != nil {
- return fmt.Errorf("invalid gw-priority: %w", err)
+ var numErr *strconv.NumError
+ if errors.As(err, &numErr) {
+ err = numErr.Err
+ }
+ return fmt.Errorf("invalid gw-priority (%s): %w", val, err)
}
default:
return errors.New("invalid field key " + key)
diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go
index 061fda57..1a885db3 100644
--- a/vendor/github.com/docker/cli/opts/opts.go
+++ b/vendor/github.com/docker/cli/opts/opts.go
@@ -1,21 +1,21 @@
package opts
import (
+ "errors"
"fmt"
"math/big"
"net"
"path"
- "regexp"
"strings"
+ "github.com/docker/cli/internal/lazyregexp"
"github.com/docker/docker/api/types/filters"
- units "github.com/docker/go-units"
- "github.com/pkg/errors"
+ "github.com/docker/go-units"
)
var (
- alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
- domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+ alphaRegexp = lazyregexp.New(`[a-zA-Z]`)
+ domainRegexp = lazyregexp.New(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
)
// ListOpts holds a list of values and a validation function.
@@ -80,10 +80,22 @@ func (opts *ListOpts) GetMap() map[string]struct{} {
}
// GetAll returns the values of slice.
+//
+// Deprecated: use [ListOpts.GetSlice] instead. This method will be removed in a future release.
func (opts *ListOpts) GetAll() []string {
return *opts.values
}
+// GetSlice returns the values of slice.
+//
+// It implements [cobra.SliceValue] to allow shell completion to be provided
+// multiple times.
+//
+// [cobra.SliceValue]: https://pkg.go.dev/github.com/spf13/cobra@v1.9.1#SliceValue
+func (opts *ListOpts) GetSlice() []string {
+ return *opts.values
+}
+
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
diff --git a/vendor/github.com/docker/cli/opts/config.go b/vendor/github.com/docker/cli/opts/swarmopts/config.go
similarity index 88%
rename from vendor/github.com/docker/cli/opts/config.go
rename to vendor/github.com/docker/cli/opts/swarmopts/config.go
index 1fc0eb35..ff137304 100644
--- a/vendor/github.com/docker/cli/opts/config.go
+++ b/vendor/github.com/docker/cli/opts/swarmopts/config.go
@@ -1,4 +1,4 @@
-package opts
+package swarmopts
import (
"encoding/csv"
@@ -8,12 +8,12 @@ import (
"strconv"
"strings"
- swarmtypes "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/swarm"
)
// ConfigOpt is a Value type for parsing configs
type ConfigOpt struct {
- values []*swarmtypes.ConfigReference
+ values []*swarm.ConfigReference
}
// Set a new config value
@@ -24,8 +24,8 @@ func (o *ConfigOpt) Set(value string) error {
return err
}
- options := &swarmtypes.ConfigReference{
- File: &swarmtypes.ConfigReferenceFileTarget{
+ options := &swarm.ConfigReference{
+ File: &swarm.ConfigReferenceFileTarget{
UID: "0",
GID: "0",
Mode: 0o444,
@@ -95,6 +95,6 @@ func (o *ConfigOpt) String() string {
}
// Value returns the config requests
-func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference {
+func (o *ConfigOpt) Value() []*swarm.ConfigReference {
return o.values
}
diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/swarmopts/port.go
similarity index 84%
rename from vendor/github.com/docker/cli/opts/port.go
rename to vendor/github.com/docker/cli/opts/swarmopts/port.go
index 0407355e..e15c6b83 100644
--- a/vendor/github.com/docker/cli/opts/port.go
+++ b/vendor/github.com/docker/cli/opts/swarmopts/port.go
@@ -1,4 +1,4 @@
-package opts
+package swarmopts
import (
"encoding/csv"
@@ -46,42 +46,50 @@ func (p *PortOpt) Set(value string) error {
// TODO(thaJeztah): these options should not be case-insensitive.
key, val, ok := strings.Cut(strings.ToLower(field), "=")
if !ok || key == "" {
- return fmt.Errorf("invalid field %s", field)
+ return fmt.Errorf("invalid field: %s", field)
}
switch key {
case portOptProtocol:
if val != string(swarm.PortConfigProtocolTCP) && val != string(swarm.PortConfigProtocolUDP) && val != string(swarm.PortConfigProtocolSCTP) {
- return fmt.Errorf("invalid protocol value %s", val)
+ return fmt.Errorf("invalid protocol value '%s'", val)
}
pConfig.Protocol = swarm.PortConfigProtocol(val)
case portOptMode:
if val != string(swarm.PortConfigPublishModeIngress) && val != string(swarm.PortConfigPublishModeHost) {
- return fmt.Errorf("invalid publish mode value %s", val)
+ return fmt.Errorf("invalid publish mode value (%s): must be either '%s' or '%s'", val, swarm.PortConfigPublishModeIngress, swarm.PortConfigPublishModeHost)
}
pConfig.PublishMode = swarm.PortConfigPublishMode(val)
case portOptTargetPort:
tPort, err := strconv.ParseUint(val, 10, 16)
if err != nil {
- return err
+ var numErr *strconv.NumError
+ if errors.As(err, &numErr) {
+ err = numErr.Err
+ }
+ return fmt.Errorf("invalid target port (%s): value must be an integer: %w", val, err)
}
pConfig.TargetPort = uint32(tPort)
case portOptPublishedPort:
pPort, err := strconv.ParseUint(val, 10, 16)
if err != nil {
- return err
+ var numErr *strconv.NumError
+ if errors.As(err, &numErr) {
+ err = numErr.Err
+ }
+ return fmt.Errorf("invalid published port (%s): value must be an integer: %w", val, err)
}
pConfig.PublishedPort = uint32(pPort)
default:
- return fmt.Errorf("invalid field key %s", key)
+ return fmt.Errorf("invalid field key: %s", key)
}
}
if pConfig.TargetPort == 0 {
- return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
+ return fmt.Errorf("missing mandatory field '%s'", portOptTargetPort)
}
if pConfig.PublishMode == "" {
diff --git a/vendor/github.com/docker/cli/opts/secret.go b/vendor/github.com/docker/cli/opts/swarmopts/secret.go
similarity index 88%
rename from vendor/github.com/docker/cli/opts/secret.go
rename to vendor/github.com/docker/cli/opts/swarmopts/secret.go
index bdf232de..9f97627a 100644
--- a/vendor/github.com/docker/cli/opts/secret.go
+++ b/vendor/github.com/docker/cli/opts/swarmopts/secret.go
@@ -1,4 +1,4 @@
-package opts
+package swarmopts
import (
"encoding/csv"
@@ -8,12 +8,12 @@ import (
"strconv"
"strings"
- swarmtypes "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/swarm"
)
// SecretOpt is a Value type for parsing secrets
type SecretOpt struct {
- values []*swarmtypes.SecretReference
+ values []*swarm.SecretReference
}
// Set a new secret value
@@ -24,8 +24,8 @@ func (o *SecretOpt) Set(value string) error {
return err
}
- options := &swarmtypes.SecretReference{
- File: &swarmtypes.SecretReferenceFileTarget{
+ options := &swarm.SecretReference{
+ File: &swarm.SecretReferenceFileTarget{
UID: "0",
GID: "0",
Mode: 0o444,
@@ -94,6 +94,6 @@ func (o *SecretOpt) String() string {
}
// Value returns the secret requests
-func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
+func (o *SecretOpt) Value() []*swarm.SecretReference {
return o.values
}
diff --git a/vendor/github.com/docker/cli/templates/templates.go b/vendor/github.com/docker/cli/templates/templates.go
index da2354ca..f0726eec 100644
--- a/vendor/github.com/docker/cli/templates/templates.go
+++ b/vendor/github.com/docker/cli/templates/templates.go
@@ -1,5 +1,5 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
-//go:build go1.22
+//go:build go1.23
package templates
diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go
deleted file mode 100644
index 88367b0a..00000000
--- a/vendor/github.com/docker/distribution/manifest/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package manifest
diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
deleted file mode 100644
index bea2341c..00000000
--- a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package manifestlist
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest"
- "github.com/opencontainers/go-digest"
- v1 "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-const (
- // MediaTypeManifestList specifies the mediaType for manifest lists.
- MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
-)
-
-// SchemaVersion provides a pre-initialized version structure for this
-// packages version of the manifest.
-var SchemaVersion = manifest.Versioned{
- SchemaVersion: 2,
- MediaType: MediaTypeManifestList,
-}
-
-// OCISchemaVersion provides a pre-initialized version structure for this
-// packages OCIschema version of the manifest.
-var OCISchemaVersion = manifest.Versioned{
- SchemaVersion: 2,
- MediaType: v1.MediaTypeImageIndex,
-}
-
-func init() {
- manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
- m := new(DeserializedManifestList)
- err := m.UnmarshalJSON(b)
- if err != nil {
- return nil, distribution.Descriptor{}, err
- }
-
- if m.MediaType != MediaTypeManifestList {
- err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'",
- MediaTypeManifestList, m.MediaType)
-
- return nil, distribution.Descriptor{}, err
- }
-
- dgst := digest.FromBytes(b)
- return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
- }
- err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
- if err != nil {
- panic(fmt.Sprintf("Unable to register manifest: %s", err))
- }
-
- imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
- if err := validateIndex(b); err != nil {
- return nil, distribution.Descriptor{}, err
- }
- m := new(DeserializedManifestList)
- err := m.UnmarshalJSON(b)
- if err != nil {
- return nil, distribution.Descriptor{}, err
- }
-
- if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
- err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'",
- v1.MediaTypeImageIndex, m.MediaType)
-
- return nil, distribution.Descriptor{}, err
- }
-
- dgst := digest.FromBytes(b)
- return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err
- }
- err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc)
- if err != nil {
- panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
- }
-}
-
-// PlatformSpec specifies a platform where a particular image manifest is
-// applicable.
-type PlatformSpec struct {
- // Architecture field specifies the CPU architecture, for example
- // `amd64` or `ppc64`.
- Architecture string `json:"architecture"`
-
- // OS specifies the operating system, for example `linux` or `windows`.
- OS string `json:"os"`
-
- // OSVersion is an optional field specifying the operating system
- // version, for example `10.0.10586`.
- OSVersion string `json:"os.version,omitempty"`
-
- // OSFeatures is an optional field specifying an array of strings,
- // each listing a required OS feature (for example on Windows `win32k`).
- OSFeatures []string `json:"os.features,omitempty"`
-
- // Variant is an optional field specifying a variant of the CPU, for
- // example `ppc64le` to specify a little-endian version of a PowerPC CPU.
- Variant string `json:"variant,omitempty"`
-
- // Features is an optional field specifying an array of strings, each
- // listing a required CPU feature (for example `sse4` or `aes`).
- Features []string `json:"features,omitempty"`
-}
-
-// A ManifestDescriptor references a platform-specific manifest.
-type ManifestDescriptor struct {
- distribution.Descriptor
-
- // Platform specifies which platform the manifest pointed to by the
- // descriptor runs on.
- Platform PlatformSpec `json:"platform"`
-}
-
-// ManifestList references manifests for various platforms.
-type ManifestList struct {
- manifest.Versioned
-
- // Config references the image configuration as a blob.
- Manifests []ManifestDescriptor `json:"manifests"`
-}
-
-// References returns the distribution descriptors for the referenced image
-// manifests.
-func (m ManifestList) References() []distribution.Descriptor {
- dependencies := make([]distribution.Descriptor, len(m.Manifests))
- for i := range m.Manifests {
- dependencies[i] = m.Manifests[i].Descriptor
- }
-
- return dependencies
-}
-
-// DeserializedManifestList wraps ManifestList with a copy of the original
-// JSON.
-type DeserializedManifestList struct {
- ManifestList
-
- // canonical is the canonical byte representation of the Manifest.
- canonical []byte
-}
-
-// FromDescriptors takes a slice of descriptors, and returns a
-// DeserializedManifestList which contains the resulting manifest list
-// and its JSON representation.
-func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
- var mediaType string
- if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest {
- mediaType = v1.MediaTypeImageIndex
- } else {
- mediaType = MediaTypeManifestList
- }
-
- return FromDescriptorsWithMediaType(descriptors, mediaType)
-}
-
-// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
-func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) {
- m := ManifestList{
- Versioned: manifest.Versioned{
- SchemaVersion: 2,
- MediaType: mediaType,
- },
- }
-
- m.Manifests = make([]ManifestDescriptor, len(descriptors))
- copy(m.Manifests, descriptors)
-
- deserialized := DeserializedManifestList{
- ManifestList: m,
- }
-
- var err error
- deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
- return &deserialized, err
-}
-
-// UnmarshalJSON populates a new ManifestList struct from JSON data.
-func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
- m.canonical = make([]byte, len(b))
- // store manifest list in canonical
- copy(m.canonical, b)
-
- // Unmarshal canonical JSON into ManifestList object
- var manifestList ManifestList
- if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
- return err
- }
-
- m.ManifestList = manifestList
-
- return nil
-}
-
-// MarshalJSON returns the contents of canonical. If canonical is empty,
-// marshals the inner contents.
-func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
- if len(m.canonical) > 0 {
- return m.canonical, nil
- }
-
- return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
-}
-
-// Payload returns the raw content of the manifest list. The contents can be
-// used to calculate the content identifier.
-func (m DeserializedManifestList) Payload() (string, []byte, error) {
- var mediaType string
- if m.MediaType == "" {
- mediaType = v1.MediaTypeImageIndex
- } else {
- mediaType = m.MediaType
- }
-
- return mediaType, m.canonical, nil
-}
-
-// unknownDocument represents a manifest, manifest list, or index that has not
-// yet been validated
-type unknownDocument struct {
- Config interface{} `json:"config,omitempty"`
- Layers interface{} `json:"layers,omitempty"`
-}
-
-// validateIndex returns an error if the byte slice is invalid JSON or if it
-// contains fields that belong to a manifest
-func validateIndex(b []byte) error {
- var doc unknownDocument
- if err := json.Unmarshal(b, &doc); err != nil {
- return err
- }
- if doc.Config != nil || doc.Layers != nil {
- return errors.New("index: expected index but found manifest")
- }
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/manifest/ocischema/builder.go b/vendor/github.com/docker/distribution/manifest/ocischema/builder.go
deleted file mode 100644
index b89bf5b7..00000000
--- a/vendor/github.com/docker/distribution/manifest/ocischema/builder.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package ocischema
-
-import (
- "context"
- "errors"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest"
- "github.com/opencontainers/go-digest"
- v1 "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// Builder is a type for constructing manifests.
-type Builder struct {
- // bs is a BlobService used to publish the configuration blob.
- bs distribution.BlobService
-
- // configJSON references
- configJSON []byte
-
- // layers is a list of layer descriptors that gets built by successive
- // calls to AppendReference.
- layers []distribution.Descriptor
-
- // Annotations contains arbitrary metadata relating to the targeted content.
- annotations map[string]string
-
- // For testing purposes
- mediaType string
-}
-
-// NewManifestBuilder is used to build new manifests for the current schema
-// version. It takes a BlobService so it can publish the configuration blob
-// as part of the Build process, and annotations.
-func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder {
- mb := &Builder{
- bs: bs,
- configJSON: make([]byte, len(configJSON)),
- annotations: annotations,
- mediaType: v1.MediaTypeImageManifest,
- }
- copy(mb.configJSON, configJSON)
-
- return mb
-}
-
-// SetMediaType assigns the passed mediatype or error if the mediatype is not a
-// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json"
-func (mb *Builder) SetMediaType(mediaType string) error {
- if mediaType != "" && mediaType != v1.MediaTypeImageManifest {
- return errors.New("invalid media type for OCI image manifest")
- }
-
- mb.mediaType = mediaType
- return nil
-}
-
-// Build produces a final manifest from the given references.
-func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
- m := Manifest{
- Versioned: manifest.Versioned{
- SchemaVersion: 2,
- MediaType: mb.mediaType,
- },
- Layers: make([]distribution.Descriptor, len(mb.layers)),
- Annotations: mb.annotations,
- }
- copy(m.Layers, mb.layers)
-
- configDigest := digest.FromBytes(mb.configJSON)
-
- var err error
- m.Config, err = mb.bs.Stat(ctx, configDigest)
- switch err {
- case nil:
- // Override MediaType, since Put always replaces the specified media
- // type with application/octet-stream in the descriptor it returns.
- m.Config.MediaType = v1.MediaTypeImageConfig
- return FromStruct(m)
- case distribution.ErrBlobUnknown:
- // nop
- default:
- return nil, err
- }
-
- // Add config to the blob store
- m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON)
- // Override MediaType, since Put always replaces the specified media
- // type with application/octet-stream in the descriptor it returns.
- m.Config.MediaType = v1.MediaTypeImageConfig
- if err != nil {
- return nil, err
- }
-
- return FromStruct(m)
-}
-
-// AppendReference adds a reference to the current ManifestBuilder.
-func (mb *Builder) AppendReference(d distribution.Describable) error {
- mb.layers = append(mb.layers, d.Descriptor())
- return nil
-}
-
-// References returns the current references added to this builder.
-func (mb *Builder) References() []distribution.Descriptor {
- return mb.layers
-}
diff --git a/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go b/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go
deleted file mode 100644
index d51f8deb..00000000
--- a/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package ocischema
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest"
- "github.com/opencontainers/go-digest"
- v1 "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-var (
- // SchemaVersion provides a pre-initialized version structure for this
- // packages version of the manifest.
- SchemaVersion = manifest.Versioned{
- SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
- MediaType: v1.MediaTypeImageManifest,
- }
-)
-
-func init() {
- ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
- if err := validateManifest(b); err != nil {
- return nil, distribution.Descriptor{}, err
- }
- m := new(DeserializedManifest)
- err := m.UnmarshalJSON(b)
- if err != nil {
- return nil, distribution.Descriptor{}, err
- }
-
- dgst := digest.FromBytes(b)
- return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err
- }
- err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc)
- if err != nil {
- panic(fmt.Sprintf("Unable to register manifest: %s", err))
- }
-}
-
-// Manifest defines a ocischema manifest.
-type Manifest struct {
- manifest.Versioned
-
- // Config references the image configuration as a blob.
- Config distribution.Descriptor `json:"config"`
-
- // Layers lists descriptors for the layers referenced by the
- // configuration.
- Layers []distribution.Descriptor `json:"layers"`
-
- // Annotations contains arbitrary metadata for the image manifest.
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-// References returns the descriptors of this manifests references.
-func (m Manifest) References() []distribution.Descriptor {
- references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
- references = append(references, m.Config)
- references = append(references, m.Layers...)
- return references
-}
-
-// Target returns the target of this manifest.
-func (m Manifest) Target() distribution.Descriptor {
- return m.Config
-}
-
-// DeserializedManifest wraps Manifest with a copy of the original JSON.
-// It satisfies the distribution.Manifest interface.
-type DeserializedManifest struct {
- Manifest
-
- // canonical is the canonical byte representation of the Manifest.
- canonical []byte
-}
-
-// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
-// DeserializedManifest which contains the manifest and its JSON representation.
-func FromStruct(m Manifest) (*DeserializedManifest, error) {
- var deserialized DeserializedManifest
- deserialized.Manifest = m
-
- var err error
- deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
- return &deserialized, err
-}
-
-// UnmarshalJSON populates a new Manifest struct from JSON data.
-func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
- m.canonical = make([]byte, len(b))
- // store manifest in canonical
- copy(m.canonical, b)
-
- // Unmarshal canonical JSON into Manifest object
- var manifest Manifest
- if err := json.Unmarshal(m.canonical, &manifest); err != nil {
- return err
- }
-
- if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest {
- return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'",
- v1.MediaTypeImageManifest, manifest.MediaType)
- }
-
- m.Manifest = manifest
-
- return nil
-}
-
-// MarshalJSON returns the contents of canonical. If canonical is empty,
-// marshals the inner contents.
-func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
- if len(m.canonical) > 0 {
- return m.canonical, nil
- }
-
- return nil, errors.New("JSON representation not initialized in DeserializedManifest")
-}
-
-// Payload returns the raw content of the manifest. The contents can be used to
-// calculate the content identifier.
-func (m DeserializedManifest) Payload() (string, []byte, error) {
- return v1.MediaTypeImageManifest, m.canonical, nil
-}
-
-// unknownDocument represents a manifest, manifest list, or index that has not
-// yet been validated
-type unknownDocument struct {
- Manifests interface{} `json:"manifests,omitempty"`
-}
-
-// validateManifest returns an error if the byte slice is invalid JSON or if it
-// contains fields that belong to a index
-func validateManifest(b []byte) error {
- var doc unknownDocument
- if err := json.Unmarshal(b, &doc); err != nil {
- return err
- }
- if doc.Manifests != nil {
- return errors.New("ocimanifest: expected manifest but found index")
- }
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go
deleted file mode 100644
index 3facaae6..00000000
--- a/vendor/github.com/docker/distribution/manifest/schema2/builder.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package schema2
-
-import (
- "context"
-
- "github.com/docker/distribution"
- "github.com/opencontainers/go-digest"
-)
-
-// builder is a type for constructing manifests.
-type builder struct {
- // bs is a BlobService used to publish the configuration blob.
- bs distribution.BlobService
-
- // configMediaType is media type used to describe configuration
- configMediaType string
-
- // configJSON references
- configJSON []byte
-
- // dependencies is a list of descriptors that gets built by successive
- // calls to AppendReference. In case of image configuration these are layers.
- dependencies []distribution.Descriptor
-}
-
-// NewManifestBuilder is used to build new manifests for the current schema
-// version. It takes a BlobService so it can publish the configuration blob
-// as part of the Build process.
-func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
- mb := &builder{
- bs: bs,
- configMediaType: configMediaType,
- configJSON: make([]byte, len(configJSON)),
- }
- copy(mb.configJSON, configJSON)
-
- return mb
-}
-
-// Build produces a final manifest from the given references.
-func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
- m := Manifest{
- Versioned: SchemaVersion,
- Layers: make([]distribution.Descriptor, len(mb.dependencies)),
- }
- copy(m.Layers, mb.dependencies)
-
- configDigest := digest.FromBytes(mb.configJSON)
-
- var err error
- m.Config, err = mb.bs.Stat(ctx, configDigest)
- switch err {
- case nil:
- // Override MediaType, since Put always replaces the specified media
- // type with application/octet-stream in the descriptor it returns.
- m.Config.MediaType = mb.configMediaType
- return FromStruct(m)
- case distribution.ErrBlobUnknown:
- // nop
- default:
- return nil, err
- }
-
- // Add config to the blob store
- m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
- // Override MediaType, since Put always replaces the specified media
- // type with application/octet-stream in the descriptor it returns.
- m.Config.MediaType = mb.configMediaType
- if err != nil {
- return nil, err
- }
-
- return FromStruct(m)
-}
-
-// AppendReference adds a reference to the current ManifestBuilder.
-func (mb *builder) AppendReference(d distribution.Describable) error {
- mb.dependencies = append(mb.dependencies, d.Descriptor())
- return nil
-}
-
-// References returns the current references added to this builder.
-func (mb *builder) References() []distribution.Descriptor {
- return mb.dependencies
-}
diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go
deleted file mode 100644
index 41f48029..00000000
--- a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package schema2
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest"
- "github.com/opencontainers/go-digest"
-)
-
-const (
- // MediaTypeManifest specifies the mediaType for the current version.
- MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
-
- // MediaTypeImageConfig specifies the mediaType for the image configuration.
- MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
-
- // MediaTypePluginConfig specifies the mediaType for plugin configuration.
- MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
-
- // MediaTypeLayer is the mediaType used for layers referenced by the
- // manifest.
- MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
-
- // MediaTypeForeignLayer is the mediaType used for layers that must be
- // downloaded from foreign URLs.
- MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
-
- // MediaTypeUncompressedLayer is the mediaType used for layers which
- // are not compressed.
- MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
-)
-
-var (
- // SchemaVersion provides a pre-initialized version structure for this
- // packages version of the manifest.
- SchemaVersion = manifest.Versioned{
- SchemaVersion: 2,
- MediaType: MediaTypeManifest,
- }
-)
-
-func init() {
- schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
- m := new(DeserializedManifest)
- err := m.UnmarshalJSON(b)
- if err != nil {
- return nil, distribution.Descriptor{}, err
- }
-
- dgst := digest.FromBytes(b)
- return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
- }
- err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
- if err != nil {
- panic(fmt.Sprintf("Unable to register manifest: %s", err))
- }
-}
-
-// Manifest defines a schema2 manifest.
-type Manifest struct {
- manifest.Versioned
-
- // Config references the image configuration as a blob.
- Config distribution.Descriptor `json:"config"`
-
- // Layers lists descriptors for the layers referenced by the
- // configuration.
- Layers []distribution.Descriptor `json:"layers"`
-}
-
-// References returns the descriptors of this manifests references.
-func (m Manifest) References() []distribution.Descriptor {
- references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
- references = append(references, m.Config)
- references = append(references, m.Layers...)
- return references
-}
-
-// Target returns the target of this manifest.
-func (m Manifest) Target() distribution.Descriptor {
- return m.Config
-}
-
-// DeserializedManifest wraps Manifest with a copy of the original JSON.
-// It satisfies the distribution.Manifest interface.
-type DeserializedManifest struct {
- Manifest
-
- // canonical is the canonical byte representation of the Manifest.
- canonical []byte
-}
-
-// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
-// DeserializedManifest which contains the manifest and its JSON representation.
-func FromStruct(m Manifest) (*DeserializedManifest, error) {
- var deserialized DeserializedManifest
- deserialized.Manifest = m
-
- var err error
- deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
- return &deserialized, err
-}
-
-// UnmarshalJSON populates a new Manifest struct from JSON data.
-func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
- m.canonical = make([]byte, len(b))
- // store manifest in canonical
- copy(m.canonical, b)
-
- // Unmarshal canonical JSON into Manifest object
- var manifest Manifest
- if err := json.Unmarshal(m.canonical, &manifest); err != nil {
- return err
- }
-
- if manifest.MediaType != MediaTypeManifest {
- return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
- MediaTypeManifest, manifest.MediaType)
-
- }
-
- m.Manifest = manifest
-
- return nil
-}
-
-// MarshalJSON returns the contents of canonical. If canonical is empty,
-// marshals the inner contents.
-func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
- if len(m.canonical) > 0 {
- return m.canonical, nil
- }
-
- return nil, errors.New("JSON representation not initialized in DeserializedManifest")
-}
-
-// Payload returns the raw content of the manifest. The contents can be used to
-// calculate the content identifier.
-func (m DeserializedManifest) Payload() (string, []byte, error) {
- return m.MediaType, m.canonical, nil
-}
diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go
deleted file mode 100644
index caa6b14e..00000000
--- a/vendor/github.com/docker/distribution/manifest/versioned.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package manifest
-
-// Versioned provides a struct with the manifest schemaVersion and mediaType.
-// Incoming content with unknown schema version can be decoded against this
-// struct to check the version.
-type Versioned struct {
- // SchemaVersion is the image manifest schema that this image follows
- SchemaVersion int `json:"schemaVersion"`
-
- // MediaType is the media type of this schema.
- MediaType string `json:"mediaType,omitempty"`
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go
deleted file mode 100644
index 7d8f1d95..00000000
--- a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package auth
-
-import (
- "net/http"
- "strings"
-)
-
-// APIVersion represents a version of an API including its
-// type and version number.
-type APIVersion struct {
- // Type refers to the name of a specific API specification
- // such as "registry"
- Type string
-
- // Version is the version of the API specification implemented,
- // This may omit the revision number and only include
- // the major and minor version, such as "2.0"
- Version string
-}
-
-// String returns the string formatted API Version
-func (v APIVersion) String() string {
- return v.Type + "/" + v.Version
-}
-
-// APIVersions gets the API versions out of an HTTP response using the provided
-// version header as the key for the HTTP header.
-func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
- versions := []APIVersion{}
- if versionHeader != "" {
- for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
- for _, version := range strings.Fields(supportedVersions) {
- versions = append(versions, ParseAPIVersion(version))
- }
- }
- }
- return versions
-}
-
-// ParseAPIVersion parses an API version string into an APIVersion
-// Format (Expected, not enforced):
-// API version string = '/'
-// API type = [a-z][a-z0-9]*
-// API version = [0-9]+(\.[0-9]+)?
-// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
-func ParseAPIVersion(versionStr string) APIVersion {
- idx := strings.IndexRune(versionStr, '/')
- if idx == -1 {
- return APIVersion{
- Type: "unknown",
- Version: versionStr,
- }
- }
- return APIVersion{
- Type: strings.ToLower(versionStr[:idx]),
- Version: versionStr[idx+1:],
- }
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go
deleted file mode 100644
index aad8a0e6..00000000
--- a/vendor/github.com/docker/distribution/registry/client/auth/session.go
+++ /dev/null
@@ -1,530 +0,0 @@
-package auth
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "github.com/docker/distribution/registry/client"
- "github.com/docker/distribution/registry/client/auth/challenge"
- "github.com/docker/distribution/registry/client/transport"
-)
-
-var (
- // ErrNoBasicAuthCredentials is returned if a request can't be authorized with
- // basic auth due to lack of credentials.
- ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
-
- // ErrNoToken is returned if a request is successful but the body does not
- // contain an authorization token.
- ErrNoToken = errors.New("authorization server did not include a token in the response")
-)
-
-const defaultClientID = "registry-client"
-
-// AuthenticationHandler is an interface for authorizing a request from
-// params from a "WWW-Authenicate" header for a single scheme.
-type AuthenticationHandler interface {
- // Scheme returns the scheme as expected from the "WWW-Authenicate" header.
- Scheme() string
-
- // AuthorizeRequest adds the authorization header to a request (if needed)
- // using the parameters from "WWW-Authenticate" method. The parameters
- // values depend on the scheme.
- AuthorizeRequest(req *http.Request, params map[string]string) error
-}
-
-// CredentialStore is an interface for getting credentials for
-// a given URL
-type CredentialStore interface {
- // Basic returns basic auth for the given URL
- Basic(*url.URL) (string, string)
-
- // RefreshToken returns a refresh token for the
- // given URL and service
- RefreshToken(*url.URL, string) string
-
- // SetRefreshToken sets the refresh token if none
- // is provided for the given url and service
- SetRefreshToken(realm *url.URL, service, token string)
-}
-
-// NewAuthorizer creates an authorizer which can handle multiple authentication
-// schemes. The handlers are tried in order, the higher priority authentication
-// methods should be first. The challengeMap holds a list of challenges for
-// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
-func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
- return &endpointAuthorizer{
- challenges: manager,
- handlers: handlers,
- }
-}
-
-type endpointAuthorizer struct {
- challenges challenge.Manager
- handlers []AuthenticationHandler
-}
-
-func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
- pingPath := req.URL.Path
- if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
- pingPath = pingPath[:v2Root+4]
- } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
- pingPath = pingPath[:v1Root] + "/v2/"
- } else {
- return nil
- }
-
- ping := url.URL{
- Host: req.URL.Host,
- Scheme: req.URL.Scheme,
- Path: pingPath,
- }
-
- challenges, err := ea.challenges.GetChallenges(ping)
- if err != nil {
- return err
- }
-
- if len(challenges) > 0 {
- for _, handler := range ea.handlers {
- for _, c := range challenges {
- if c.Scheme != handler.Scheme() {
- continue
- }
- if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-// This is the minimum duration a token can last (in seconds).
-// A token must not live less than 60 seconds because older versions
-// of the Docker client didn't read their expiration from the token
-// response and assumed 60 seconds. So to remain compatible with
-// those implementations, a token must live at least this long.
-const minimumTokenLifetimeSeconds = 60
-
-// Private interface for time used by this package to enable tests to provide their own implementation.
-type clock interface {
- Now() time.Time
-}
-
-type tokenHandler struct {
- creds CredentialStore
- transport http.RoundTripper
- clock clock
-
- offlineAccess bool
- forceOAuth bool
- clientID string
- scopes []Scope
-
- tokenLock sync.Mutex
- tokenCache string
- tokenExpiration time.Time
-
- logger Logger
-}
-
-// Scope is a type which is serializable to a string
-// using the allow scope grammar.
-type Scope interface {
- String() string
-}
-
-// RepositoryScope represents a token scope for access
-// to a repository.
-type RepositoryScope struct {
- Repository string
- Class string
- Actions []string
-}
-
-// String returns the string representation of the repository
-// using the scope grammar
-func (rs RepositoryScope) String() string {
- repoType := "repository"
- // Keep existing format for image class to maintain backwards compatibility
- // with authorization servers which do not support the expanded grammar.
- if rs.Class != "" && rs.Class != "image" {
- repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
- }
- return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
-}
-
-// RegistryScope represents a token scope for access
-// to resources in the registry.
-type RegistryScope struct {
- Name string
- Actions []string
-}
-
-// String returns the string representation of the user
-// using the scope grammar
-func (rs RegistryScope) String() string {
- return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
-}
-
-// Logger defines the injectable logging interface, used on TokenHandlers.
-type Logger interface {
- Debugf(format string, args ...interface{})
-}
-
-func logDebugf(logger Logger, format string, args ...interface{}) {
- if logger == nil {
- return
- }
- logger.Debugf(format, args...)
-}
-
-// TokenHandlerOptions is used to configure a new token handler
-type TokenHandlerOptions struct {
- Transport http.RoundTripper
- Credentials CredentialStore
-
- OfflineAccess bool
- ForceOAuth bool
- ClientID string
- Scopes []Scope
- Logger Logger
-}
-
-// An implementation of clock for providing real time data.
-type realClock struct{}
-
-// Now implements clock
-func (realClock) Now() time.Time { return time.Now() }
-
-// NewTokenHandler creates a new AuthenicationHandler which supports
-// fetching tokens from a remote token server.
-func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
- // Create options...
- return NewTokenHandlerWithOptions(TokenHandlerOptions{
- Transport: transport,
- Credentials: creds,
- Scopes: []Scope{
- RepositoryScope{
- Repository: scope,
- Actions: actions,
- },
- },
- })
-}
-
-// NewTokenHandlerWithOptions creates a new token handler using the provided
-// options structure.
-func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
- handler := &tokenHandler{
- transport: options.Transport,
- creds: options.Credentials,
- offlineAccess: options.OfflineAccess,
- forceOAuth: options.ForceOAuth,
- clientID: options.ClientID,
- scopes: options.Scopes,
- clock: realClock{},
- logger: options.Logger,
- }
-
- return handler
-}
-
-func (th *tokenHandler) client() *http.Client {
- return &http.Client{
- Transport: th.transport,
- Timeout: 15 * time.Second,
- }
-}
-
-func (th *tokenHandler) Scheme() string {
- return "bearer"
-}
-
-func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
- var additionalScopes []string
- if fromParam := req.URL.Query().Get("from"); fromParam != "" {
- additionalScopes = append(additionalScopes, RepositoryScope{
- Repository: fromParam,
- Actions: []string{"pull"},
- }.String())
- }
-
- token, err := th.getToken(params, additionalScopes...)
- if err != nil {
- return err
- }
-
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
-
- return nil
-}
-
-func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
- th.tokenLock.Lock()
- defer th.tokenLock.Unlock()
- scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
- for _, scope := range th.scopes {
- scopes = append(scopes, scope.String())
- }
- var addedScopes bool
- for _, scope := range additionalScopes {
- if hasScope(scopes, scope) {
- continue
- }
- scopes = append(scopes, scope)
- addedScopes = true
- }
-
- now := th.clock.Now()
- if now.After(th.tokenExpiration) || addedScopes {
- token, expiration, err := th.fetchToken(params, scopes)
- if err != nil {
- return "", err
- }
-
- // do not update cache for added scope tokens
- if !addedScopes {
- th.tokenCache = token
- th.tokenExpiration = expiration
- }
-
- return token, nil
- }
-
- return th.tokenCache, nil
-}
-
-func hasScope(scopes []string, scope string) bool {
- for _, s := range scopes {
- if s == scope {
- return true
- }
- }
- return false
-}
-
-type postTokenResponse struct {
- AccessToken string `json:"access_token"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
- Scope string `json:"scope"`
-}
-
-func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
- form := url.Values{}
- form.Set("scope", strings.Join(scopes, " "))
- form.Set("service", service)
-
- clientID := th.clientID
- if clientID == "" {
- // Use default client, this is a required field
- clientID = defaultClientID
- }
- form.Set("client_id", clientID)
-
- if refreshToken != "" {
- form.Set("grant_type", "refresh_token")
- form.Set("refresh_token", refreshToken)
- } else if th.creds != nil {
- form.Set("grant_type", "password")
- username, password := th.creds.Basic(realm)
- form.Set("username", username)
- form.Set("password", password)
-
- // attempt to get a refresh token
- form.Set("access_type", "offline")
- } else {
- // refuse to do oauth without a grant type
- return "", time.Time{}, fmt.Errorf("no supported grant type")
- }
-
- resp, err := th.client().PostForm(realm.String(), form)
- if err != nil {
- return "", time.Time{}, err
- }
- defer resp.Body.Close()
-
- if !client.SuccessStatus(resp.StatusCode) {
- err := client.HandleErrorResponse(resp)
- return "", time.Time{}, err
- }
-
- decoder := json.NewDecoder(resp.Body)
-
- var tr postTokenResponse
- if err = decoder.Decode(&tr); err != nil {
- return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
- }
-
- if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
- th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
- }
-
- if tr.ExpiresIn < minimumTokenLifetimeSeconds {
- // The default/minimum lifetime.
- tr.ExpiresIn = minimumTokenLifetimeSeconds
- logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
- }
-
- if tr.IssuedAt.IsZero() {
- // issued_at is optional in the token response.
- tr.IssuedAt = th.clock.Now().UTC()
- }
-
- return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
-}
-
-type getTokenResponse struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
- RefreshToken string `json:"refresh_token"`
-}
-
-func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
-
- req, err := http.NewRequest("GET", realm.String(), nil)
- if err != nil {
- return "", time.Time{}, err
- }
-
- reqParams := req.URL.Query()
-
- if service != "" {
- reqParams.Add("service", service)
- }
-
- for _, scope := range scopes {
- reqParams.Add("scope", scope)
- }
-
- if th.offlineAccess {
- reqParams.Add("offline_token", "true")
- clientID := th.clientID
- if clientID == "" {
- clientID = defaultClientID
- }
- reqParams.Add("client_id", clientID)
- }
-
- if th.creds != nil {
- username, password := th.creds.Basic(realm)
- if username != "" && password != "" {
- reqParams.Add("account", username)
- req.SetBasicAuth(username, password)
- }
- }
-
- req.URL.RawQuery = reqParams.Encode()
-
- resp, err := th.client().Do(req)
- if err != nil {
- return "", time.Time{}, err
- }
- defer resp.Body.Close()
-
- if !client.SuccessStatus(resp.StatusCode) {
- err := client.HandleErrorResponse(resp)
- return "", time.Time{}, err
- }
-
- decoder := json.NewDecoder(resp.Body)
-
- var tr getTokenResponse
- if err = decoder.Decode(&tr); err != nil {
- return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
- }
-
- if tr.RefreshToken != "" && th.creds != nil {
- th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
- }
-
- // `access_token` is equivalent to `token` and if both are specified
- // the choice is undefined. Canonicalize `access_token` by sticking
- // things in `token`.
- if tr.AccessToken != "" {
- tr.Token = tr.AccessToken
- }
-
- if tr.Token == "" {
- return "", time.Time{}, ErrNoToken
- }
-
- if tr.ExpiresIn < minimumTokenLifetimeSeconds {
- // The default/minimum lifetime.
- tr.ExpiresIn = minimumTokenLifetimeSeconds
- logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
- }
-
- if tr.IssuedAt.IsZero() {
- // issued_at is optional in the token response.
- tr.IssuedAt = th.clock.Now().UTC()
- }
-
- return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
-}
-
-func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
- realm, ok := params["realm"]
- if !ok {
- return "", time.Time{}, errors.New("no realm specified for token auth challenge")
- }
-
- // TODO(dmcgowan): Handle empty scheme and relative realm
- realmURL, err := url.Parse(realm)
- if err != nil {
- return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
- }
-
- service := params["service"]
-
- var refreshToken string
-
- if th.creds != nil {
- refreshToken = th.creds.RefreshToken(realmURL, service)
- }
-
- if refreshToken != "" || th.forceOAuth {
- return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
- }
-
- return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
-}
-
-type basicHandler struct {
- creds CredentialStore
-}
-
-// NewBasicHandler creaters a new authentiation handler which adds
-// basic authentication credentials to a request.
-func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
- return &basicHandler{
- creds: creds,
- }
-}
-
-func (*basicHandler) Scheme() string {
- return "basic"
-}
-
-func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
- if bh.creds != nil {
- username, password := bh.creds.Basic(req.URL)
- if username != "" && password != "" {
- req.SetBasicAuth(username, password)
- return nil
- }
- }
- return ErrNoBasicAuthCredentials
-}
diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go
deleted file mode 100644
index d433ccaf..00000000
--- a/vendor/github.com/docker/distribution/uuid/uuid.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Package uuid provides simple UUID generation. Only version 4 style UUIDs
-// can be generated.
-//
-// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
-package uuid
-
-import (
- "crypto/rand"
- "fmt"
- "io"
- "os"
- "syscall"
- "time"
-)
-
-const (
- // Bits is the number of bits in a UUID
- Bits = 128
-
- // Size is the number of bytes in a UUID
- Size = Bits / 8
-
- format = "%08x-%04x-%04x-%04x-%012x"
-)
-
-var (
- // ErrUUIDInvalid indicates a parsed string is not a valid uuid.
- ErrUUIDInvalid = fmt.Errorf("invalid uuid")
-
- // Loggerf can be used to override the default logging destination. Such
- // log messages in this library should be logged at warning or higher.
- Loggerf = func(format string, args ...interface{}) {}
-)
-
-// UUID represents a UUID value. UUIDs can be compared and set to other values
-// and accessed by byte.
-type UUID [Size]byte
-
-// Generate creates a new, version 4 uuid.
-func Generate() (u UUID) {
- const (
- // ensures we backoff for less than 450ms total. Use the following to
- // select new value, in units of 10ms:
- // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
- maxretries = 9
- backoff = time.Millisecond * 10
- )
-
- var (
- totalBackoff time.Duration
- count int
- retries int
- )
-
- for {
- // This should never block but the read may fail. Because of this,
- // we just try to read the random number generator until we get
- // something. This is a very rare condition but may happen.
- b := time.Duration(retries) * backoff
- time.Sleep(b)
- totalBackoff += b
-
- n, err := io.ReadFull(rand.Reader, u[count:])
- if err != nil {
- if retryOnError(err) && retries < maxretries {
- count += n
- retries++
- Loggerf("error generating version 4 uuid, retrying: %v", err)
- continue
- }
-
- // Any other errors represent a system problem. What did someone
- // do to /dev/urandom?
- panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
- }
-
- break
- }
-
- u[6] = (u[6] & 0x0f) | 0x40 // set version byte
- u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
-
- return u
-}
-
-// Parse attempts to extract a uuid from the string or returns an error.
-func Parse(s string) (u UUID, err error) {
- if len(s) != 36 {
- return UUID{}, ErrUUIDInvalid
- }
-
- // create stack addresses for each section of the uuid.
- p := make([][]byte, 5)
-
- if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
- return u, err
- }
-
- copy(u[0:4], p[0])
- copy(u[4:6], p[1])
- copy(u[6:8], p[2])
- copy(u[8:10], p[3])
- copy(u[10:16], p[4])
-
- return
-}
-
-func (u UUID) String() string {
- return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
-}
-
-// retryOnError tries to detect whether or not retrying would be fruitful.
-func retryOnError(err error) bool {
- switch err := err.(type) {
- case *os.PathError:
- return retryOnError(err.Err) // unpack the target error
- case syscall.Errno:
- if err == syscall.EPERM {
- // EPERM represents an entropy pool exhaustion, a condition under
- // which we backoff and retry.
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
index 88032def..c7c64947 100644
--- a/vendor/github.com/docker/docker/AUTHORS
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -2,6 +2,7 @@
# This file lists all contributors to the repository.
# See hack/generate-authors.sh to make modifications.
+17neverends
7sunarni <710720732@qq.com>
Aanand Prasad
Aarni Koskela
@@ -189,6 +190,7 @@ Anes Hasicic
Angel Velazquez
Anil Belur
Anil Madhavapeddy
+Anirudh Aithal
Ankit Jain
Ankush Agarwal
Anonmily
@@ -227,7 +229,7 @@ Arun Gupta
Asad Saeeduddin
Asbjørn Enge
Ashly Mathew
-Austin Vazquez
+Austin Vazquez
averagehuman
Avi Das
Avi Kivity
@@ -293,6 +295,7 @@ Brandon Liu