diff --git a/go.mod b/go.mod index 8af87fcce..c66ce07df 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module coopcloud.tech/abra -go 1.24.0 - -toolchain go1.24.1 +go 1.24.2 require ( coopcloud.tech/tagcmp v0.0.0-20250818180036-0ec1b205b5ca @@ -12,17 +10,18 @@ require ( github.com/charmbracelet/lipgloss v1.1.0 github.com/charmbracelet/log v0.4.2 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.4.0+incompatible - github.com/docker/docker v28.4.0+incompatible + github.com/docker/cli v29.1.5+incompatible + github.com/docker/docker v28.5.2+incompatible github.com/docker/go-units v0.5.0 - github.com/go-git/go-git/v5 v5.16.2 + github.com/go-git/go-git/v5 v5.16.4 github.com/google/go-cmp v0.7.0 github.com/leonelquinteros/gotext v1.7.2 + github.com/moby/moby/client v0.2.1 github.com/moby/sys/signal v0.7.1 github.com/moby/term v0.5.2 github.com/pkg/errors v0.9.1 - github.com/schollz/progressbar/v3 v3.18.0 - golang.org/x/term v0.35.0 + github.com/schollz/progressbar/v3 v3.19.0 + golang.org/x/term v0.39.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.2 ) @@ -30,7 +29,7 @@ require ( require ( dario.cat/mergo v1.0.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/BurntSushi/toml v1.5.0 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect @@ -38,19 +37,21 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/x/ansi v0.10.2 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/colorprofile v0.4.1 // indirect + github.com/charmbracelet/x/ansi v0.11.4 // indirect + github.com/charmbracelet/x/cellbuf v0.0.14 // indirect github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/clipperhouse/uax29/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.6.1 // indirect + github.com/charmbracelet/x/term v0.2.2 // indirect + github.com/clipperhouse/displaywidth v0.7.0 // indirect + github.com/clipperhouse/stringish v0.1.1 // indirect + github.com/clipperhouse/uax29/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.6.2 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect - github.com/cyphar/filepath-securejoin v0.5.0 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect @@ -61,20 +62,20 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.2 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-git/go-billy/v5 v5.7.0 // indirect + github.com/go-logfmt/logfmt v0.6.1 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.4.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.3 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect @@ -82,14 +83,14 @@ require ( github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/go-archive v0.2.0 // indirect + github.com/moby/moby/api v1.53.0-rc.1 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect + github.com/morikuni/aec v1.1.0 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.16.0 // indirect @@ -100,39 +101,40 @@ require ( github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/skeema/knownhosts v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect + github.com/skeema/knownhosts v1.3.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.opentelemetry.io/proto/otlp v1.8.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect - golang.org/x/crypto v0.42.0 // indirect - golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect - golang.org/x/net v0.44.0 // indirect - golang.org/x/text v0.29.0 // indirect - golang.org/x/time v0.13.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect - google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.9 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect + google.golang.org/grpc v1.78.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -141,7 +143,7 @@ require ( github.com/containers/image v3.0.2+incompatible github.com/containers/storage v1.38.2 // indirect github.com/decentral1se/passgen v1.0.1 - github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/docker-credential-helpers v0.9.5 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/mux v1.8.1 // indirect @@ -151,9 +153,8 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/sergi/go-diff v1.4.0 // indirect - github.com/spf13/cobra v1.10.1 + github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 - github.com/theupdateframework/notary v0.7.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - golang.org/x/sys v0.36.0 + golang.org/x/sys v0.40.0 ) diff --git a/go.sum b/go.sum index 13d10638b..245af2cf3 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -85,7 +85,6 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -104,14 +103,12 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= -github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -120,7 +117,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -129,26 +125,29 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= -github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= -github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= +github.com/charmbracelet/colorprofile v0.4.1 h1:a1lO03qTrSIRaK8c3JRxJDZOvhvIeSco3ej+ngLk1kk= +github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk= github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= -github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= -github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/ansi v0.11.3 h1:6DcVaqWI82BBVM/atTyq6yBoRLZFBsnoDoX9GCu2YOI= +github.com/charmbracelet/x/ansi v0.11.3/go.mod h1:yI7Zslym9tCJcedxz5+WBq+eUGMJT0bM06Fqy1/Y4dI= +github.com/charmbracelet/x/ansi v0.11.4 h1:6G65PLu6HjmE858CnTUQY1LXT3ZUWwfvqEROLF8vqHI= +github.com/charmbracelet/x/ansi v0.11.4/go.mod h1:/5AZ+UfWExW3int5H5ugnsG/PWjNcSQcwYsHBlPFQN4= +github.com/charmbracelet/x/cellbuf v0.0.14 h1:iUEMryGyFTelKW3THW4+FfPgi4fkmKnnaLOXuc+/Kj4= +github.com/charmbracelet/x/cellbuf v0.0.14/go.mod h1:P447lJl49ywBbil/KjCk2HexGh4tEY9LH0/1QrZZ9rA= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= +github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -164,11 +163,16 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY= -github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= -github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/clipperhouse/displaywidth v0.6.2 h1:ZDpTkFfpHOKte4RG5O/BOyf3ysnvFswpyYrV7z2uAKo= +github.com/clipperhouse/displaywidth v0.6.2/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= +github.com/clipperhouse/displaywidth v0.7.0 h1:QNv1GYsnLX9QBrcWUtMlogpTXuM5FVnBwKWp1O5NwmE= +github.com/clipperhouse/displaywidth v0.7.0/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= +github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= +github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cloudflare/circl v1.6.2 h1:hL7VBpHHKzrV5WTfHCaBsgx/HGbBYlgrwvNXEVDYYsQ= +github.com/cloudflare/circl v1.6.2/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -294,12 +298,12 @@ github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw= -github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -309,7 +313,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decentral1se/passgen v1.0.1 h1:j2AxK/kHKxDHWZZfkJj8Wgae9+O+DYEqR5sjKthIYKA= github.com/decentral1se/passgen v1.0.1/go.mod h1:530V+lNoPhKtkrX2fIVsIfLhkl47CuiOM7HRgi7C+SU= -github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -318,21 +321,25 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= -github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.1.3+incompatible h1:+kz9uDWgs+mAaIZojWfFt4d53/jv0ZUOOoSh5ZnH36c= +github.com/docker/cli v29.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.1.4+incompatible h1:AI8fwZhqsAsrqZnVv9h6lbexeW/LzNTasf6A4vcNN8M= +github.com/docker/cli v29.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt8vk2AX8ao= +github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk= -github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= -github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= -github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= -github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= +github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= @@ -351,7 +358,6 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= @@ -367,7 +373,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= -github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -389,12 +394,12 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= -github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-billy/v5 v5.7.0 h1:83lBUJhGWhYp0ngzCMSgllhUSuoHP1iEWYjsPl9nwqM= +github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= -github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-git/go-git/v5 v5.16.4 h1:7ajIEZHZJULcyJebDLo99bGgS0jRrOxzZG4uCk2Yb2Y= +github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -403,8 +408,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.1 h1:4hvbpePJKnIzH1B+8OR/JPbTx37NktoI9LE2QZBBkvE= +github.com/go-logfmt/logfmt v0.6.1/go.mod h1:EV2pOAQoZaT1ZXZbqDl5hrymndi4SY9ED9/z6CO0XAk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -423,10 +428,11 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8 github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -435,14 +441,12 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -477,7 +481,6 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -515,7 +518,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= @@ -529,10 +531,10 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -562,9 +564,6 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= -github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= @@ -576,7 +575,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= @@ -589,8 +587,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= +github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -612,11 +612,9 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leonelquinteros/gotext v1.7.2 h1:bDPndU8nt+/kRo1m4l/1OXiiy2v7Z7dfPQ9+YP7G1Mc= github.com/leonelquinteros/gotext v1.7.2/go.mod h1:9/haCkm5P7Jay1sxKDGJ5WIg4zkz8oZKw4ekNpALob8= -github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -639,29 +637,28 @@ github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhg github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= -github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= +github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/moby/api v1.53.0-rc.1 h1:M5SUwRbTrNy+plCTiV6gn4ZiN/Csynk0imIsUmOgHGI= +github.com/moby/moby/api v1.53.0-rc.1/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.2.1 h1:1Grh1552mvv6i+sYOdY+xKKVTvzJegcVMhuXocyDz/k= +github.com/moby/moby/client v0.2.1/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= @@ -688,8 +685,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= +github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= @@ -757,7 +755,6 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -772,7 +769,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -793,8 +789,10 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -806,8 +804,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -816,13 +814,15 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= -github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= +github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1IvohyTutOIFoc= +github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= @@ -839,26 +839,24 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= -github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg= +github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -866,7 +864,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -888,8 +885,6 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtse github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= -github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -941,29 +936,29 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= -go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -972,26 +967,27 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1002,8 +998,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= -golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1067,8 +1065,10 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1164,13 +1164,16 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1180,16 +1183,18 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= -golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1285,12 +1290,15 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= -google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= +google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= +google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 h1:X9z6obt+cWRX8XjDVOn+SZWhWe5kZHm46TThU9j+jss= +google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3/go.mod h1:dd646eSK+Dk9kxVBl1nChEOhJPtMXriCcVb4x3o6J+E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1308,8 +1316,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1323,11 +1331,10 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1342,7 +1349,6 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1362,6 +1368,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= @@ -1403,6 +1410,8 @@ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/context/context.go b/pkg/context/context.go index d36a76b2c..4acdf73f6 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -2,13 +2,18 @@ package context import ( "errors" + "os" "coopcloud.tech/abra/pkg/i18n" "github.com/docker/cli/cli/command" dConfig "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/context" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" contextStore "github.com/docker/cli/cli/context/store" cliflags "github.com/docker/cli/cli/flags" + dopts "github.com/docker/cli/opts" + "github.com/moby/moby/client" ) func NewDefaultDockerContextStore() *command.ContextStoreWithDefault { @@ -21,13 +26,107 @@ func NewDefaultDockerContextStore() *command.ContextStoreWithDefault { dockerContextStore := &command.ContextStoreWithDefault{ Store: store, Resolver: func() (*command.DefaultContext, error) { - return command.ResolveDefaultContext(opts, storeConfig) + return resolveDefaultContext(opts, storeConfig) }, } return dockerContextStore } +// resolveDefaultContext creates a Metadata for the current CLI invocation parameters +func resolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*command.DefaultContext, error) { + contextTLSData := store.ContextTLSData{ + Endpoints: make(map[string]store.EndpointTLSData), + } + + contextMetadata := store.Metadata{ + Endpoints: make(map[string]any), + Metadata: command.DockerContext{ + Description: "", + }, + Name: "default", + } + + dockerEP, err := resolveDefaultDockerEndpoint(opts) + if err != nil { + return nil, err + } + + contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP.EndpointMeta + if dockerEP.TLSData != nil { + contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData() + } + + if err := config.ForeachEndpointType(func(n string, get store.TypeGetter) error { + if n == docker.DockerEndpoint { // handled above + return nil + } + ep := get() + if i, ok := ep.(command.EndpointDefaultResolver); ok { + meta, tls, err := i.ResolveDefault() + if err != nil { + return err + } + if meta == nil { + return nil + } + contextMetadata.Endpoints[n] = meta + if tls != nil { + contextTLSData.Endpoints[n] = *tls + } + } + // Nothing to be done + return nil + }); err != nil { + return nil, err + } + + return &command.DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil +} + +// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags) +func resolveDefaultDockerEndpoint(opts *cliflags.ClientOptions) (docker.Endpoint, error) { + // defaultToTLS determines whether we should use a TLS host as default + // if nothing was configured by the user. + defaultToTLS := opts.TLSOptions != nil + host, err := getServerHost(opts.Hosts, defaultToTLS) + if err != nil { + return docker.Endpoint{}, err + } + + var ( + skipTLSVerify bool + tlsData *context.TLSData + ) + + if opts.TLSOptions != nil { + skipTLSVerify = opts.TLSOptions.InsecureSkipVerify + tlsData, err = context.TLSDataFromFiles(opts.TLSOptions.CAFile, opts.TLSOptions.CertFile, opts.TLSOptions.KeyFile) + if err != nil { + return docker.Endpoint{}, err + } + } + + return docker.Endpoint{ + EndpointMeta: docker.EndpointMeta{ + Host: host, + SkipTLSVerify: skipTLSVerify, + }, + TLSData: tlsData, + }, nil +} + +func getServerHost(hosts []string, defaultToTLS bool) (string, error) { + switch len(hosts) { + case 0: + return dopts.ParseHost(defaultToTLS, os.Getenv(client.EnvOverrideHost)) + case 1: + return dopts.ParseHost(defaultToTLS, hosts[0]) + default: + return "", errors.New("specify only one -H") + } +} + func GetContextEndpoint(ctx contextStore.Metadata) (string, error) { endpointmeta, ok := ctx.Endpoints["docker"].(context.EndpointMetaBase) if !ok { diff --git a/pkg/upstream/commandconn/connection.go b/pkg/upstream/commandconn/connection.go index 93e7a1423..ab00f7bb5 100644 --- a/pkg/upstream/commandconn/connection.go +++ b/pkg/upstream/commandconn/connection.go @@ -11,7 +11,7 @@ import ( "github.com/docker/cli/cli/connhelper/ssh" "github.com/docker/cli/cli/context/docker" dCliContextStore "github.com/docker/cli/cli/context/store" - dClient "github.com/docker/docker/client" + dClient "github.com/moby/moby/client" "github.com/pkg/errors" ) @@ -73,7 +73,7 @@ func getDockerEndpoint(host string) (docker.Endpoint, error) { if err != nil { return docker.Endpoint{}, err } - if _, err := dClient.NewClientWithOpts(opts...); err != nil { + if _, err := dClient.New(opts...); err != nil { return docker.Endpoint{}, err } return ep, nil diff --git a/pkg/upstream/stack/stack.go b/pkg/upstream/stack/stack.go index 2e19c1382..694a9d5d7 100644 --- a/pkg/upstream/stack/stack.go +++ b/pkg/upstream/stack/stack.go @@ -19,7 +19,7 @@ import ( "coopcloud.tech/abra/pkg/ui" "coopcloud.tech/abra/pkg/upstream/convert" "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/formatter" composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 235496eeb..1101d206d 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,7 +1,7 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a reflection interface similar to Go's standard library `json` and `xml` packages. -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). +Compatible with TOML version [v1.1.0](https://toml.io/en/v1.1.0). Documentation: https://pkg.go.dev/github.com/BurntSushi/toml diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 3fa516caa..ed884840f 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -206,6 +206,13 @@ func markDecodedRecursive(md *MetaData, tmap map[string]any) { markDecodedRecursive(md, tmap) md.context = md.context[0 : len(md.context)-1] } + if tarr, ok := tmap[key].([]map[string]any); ok { + for _, elm := range tarr { + md.context = append(md.context, key) + markDecodedRecursive(md, elm) + md.context = md.context[0 : len(md.context)-1] + } + } } } @@ -423,7 +430,7 @@ func (md *MetaData) unifyString(data any, rv reflect.Value) error { if i, ok := data.(int64); ok { rv.SetString(strconv.FormatInt(i, 10)) } else if f, ok := data.(float64); ok { - rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + rv.SetString(strconv.FormatFloat(f, 'g', -1, 64)) } else { return md.badtype("string", data) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index ac196e7df..bd7aa1865 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -228,9 +228,9 @@ func (enc *Encoder) eElement(rv reflect.Value) { } switch v.Location() { default: - enc.wf(v.Format(format)) + enc.write(v.Format(format)) case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: - enc.wf(v.In(time.UTC).Format(format)) + enc.write(v.In(time.UTC).Format(format)) } return case Marshaler: @@ -279,40 +279,40 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) + enc.write(strconv.FormatBool(rv.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) + enc.write(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) + enc.write(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 64))) } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) @@ -330,27 +330,32 @@ func (enc *Encoder) eElement(rv reflect.Value) { // By the TOML spec, all floats must have a decimal with at least one number on // either side. func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" + for _, c := range fstr { + if c == 'e' { // Exponent syntax + return fstr + } + if c == '.' { + return fstr + } } - return fstr + return fstr + ".0" } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) + enc.write(`"` + dblQuotedReplacer.Replace(s) + `"`) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() - enc.wf("[") + enc.write("[") for i := 0; i < length; i++ { elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { - enc.wf(", ") + enc.write(", ") } } - enc.wf("]") + enc.write("]") } func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { @@ -363,7 +368,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { continue } enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.writef("%s[[%s]]", enc.indentStr(key), key) enc.newline() enc.eMapOrStruct(key, trv, false) } @@ -376,7 +381,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key) + enc.writef("%s[%s]", enc.indentStr(key), key) enc.newline() } enc.eMapOrStruct(key, rv, false) @@ -422,7 +427,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(mapKey.String()), val) @@ -431,12 +436,12 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) writeMapKeys(mapKeysSub, false) if inline { - enc.wf("}") + enc.write("}") } } @@ -534,7 +539,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) if fieldIndex[0] != totalFields-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(keyName), fieldVal) @@ -543,14 +548,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } l := len(fieldsDirect) + len(fieldsSub) writeFields(fieldsDirect, l) writeFields(fieldsSub, l) if inline { - enc.wf("}") + enc.write("}") } } @@ -700,7 +705,7 @@ func isEmpty(rv reflect.Value) bool { func (enc *Encoder) newline() { if enc.hasWritten { - enc.wf("\n") + enc.write("\n") } } @@ -722,14 +727,22 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { enc.eElement(val) return } - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.writef("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) if !inline { enc.newline() } } -func (enc *Encoder) wf(format string, v ...any) { +func (enc *Encoder) write(s string) { + _, err := enc.w.WriteString(s) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) writef(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 1c3b47702..9f4396a0f 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -13,7 +13,6 @@ type itemType int const ( itemError itemType = iota - itemNIL // used in the parser to indicate no type itemEOF itemText itemString @@ -47,14 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - tomlNext bool - esc bool + input string + start int + pos int + line int + state stateFn + items chan item + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -90,14 +88,13 @@ func (lx *lexer) nextItem() item { } } -func lex(input string, tomlNext bool) *lexer { +func lex(input string) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, - tomlNext: tomlNext, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, } return lx } @@ -108,7 +105,7 @@ func (lx *lexer) push(state stateFn) { func (lx *lexer) pop() stateFn { if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") + panic("BUG in lexer: no states to pop") } last := lx.stack[len(lx.stack)-1] lx.stack = lx.stack[0 : len(lx.stack)-1] @@ -305,6 +302,8 @@ func lexTop(lx *lexer) stateFn { return lexTableStart case eof: if lx.pos > lx.start { + // TODO: never reached? I think this can only occur on a bug in the + // lexer(?) return lx.errorf("unexpected EOF") } lx.emit(itemEOF) @@ -392,8 +391,6 @@ func lexTableNameStart(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd case r == '.': lx.ignore() return lexTableNameStart @@ -412,7 +409,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r, lx.tomlNext) { + if isBareKeyChar(r) { return lexBareName } lx.backup() @@ -420,23 +417,23 @@ func lexBareName(lx *lexer) stateFn { return lx.pop() } -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. +// lexQuotedName lexes one part of a quoted key or table name. It assumes that +// it starts lexing at the quote itself (" or '). // // Lexes only one part, e.g. only '"a"' inside '"a".b'. func lexQuotedName(lx *lexer) stateFn { r := lx.next() switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) case r == '"': lx.ignore() // ignore the '"' return lexString case r == '\'': lx.ignore() // ignore the "'" return lexRawString + + // TODO: I don't think any of the below conditions can ever be reached? + case isWhitespace(r): + return lexSkip(lx, lexValue) case r == eof: return lx.errorf("unexpected EOF; expected value") default: @@ -464,17 +461,19 @@ func lexKeyStart(lx *lexer) stateFn { func lexKeyNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '='") - case r == '.': - return lx.errorf("unexpected '.'") + default: + lx.push(lexKeyEnd) + return lexBareName case r == '"' || r == '\'': lx.ignore() lx.push(lexKeyEnd) return lexQuotedName - default: - lx.push(lexKeyEnd) - return lexBareName + + // TODO: I think these can never be reached? + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") } } @@ -485,7 +484,7 @@ func lexKeyEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexKeyEnd) - case r == eof: + case r == eof: // TODO: never reached return lx.errorf("unexpected EOF; expected key separator '='") case r == '.': lx.ignore() @@ -628,10 +627,7 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValue) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValue) case r == '#': lx.push(lexInlineTableValue) return lexCommentStart @@ -653,10 +649,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValueEnd) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValueEnd) case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart @@ -664,10 +657,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { - if lx.tomlNext { - return lexInlineTableValueEnd - } - return lx.errorf("trailing comma not allowed in inline tables") + return lexInlineTableValueEnd } return lexInlineTableValue case r == '}': @@ -855,9 +845,6 @@ func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { case 'e': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } fallthrough case 'b': fallthrough @@ -878,9 +865,6 @@ func lexStringEscape(lx *lexer) stateFn { case '\\': return lx.pop() case 'x': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } return lexHexEscape case 'u': return lexShortUnicodeEscape @@ -928,19 +912,9 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { // lexBaseNumberOrDate can differentiate base prefixed integers from other // types. func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - switch r { - case '0': + if lx.next() == '0' { return lexBaseNumberOrDate } - - if !isDigit(r) { - // The only way to reach this state is if the value starts - // with a digit, so specifically treat anything else as an - // error. - return lx.errorf("expected a digit but got %q", r) - } - return lexNumberOrDate } @@ -1196,13 +1170,13 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn { } func (s stateFn) String() string { + if s == nil { + return "" + } name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() if i := strings.LastIndexByte(name, '.'); i > -1 { name = name[i+1:] } - if s == nil { - name = "" - } return name + "()" } @@ -1210,8 +1184,6 @@ func (itype itemType) String() string { switch itype { case itemError: return "Error" - case itemNIL: - return "NIL" case itemEOF: return "EOF" case itemText: @@ -1226,18 +1198,22 @@ func (itype itemType) String() string { return "Float" case itemDatetime: return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemKeyEnd: - return "KeyEnd" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemArrayTableStart: + return "ArrayTableStart" + case itemArrayTableEnd: + return "ArrayTableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" case itemCommentStart: return "CommentStart" case itemInlineTableStart: @@ -1266,7 +1242,7 @@ func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } -func isBareKeyChar(r rune, tomlNext bool) bool { +func isBareKeyChar(r rune) bool { return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index e3ea8a9a2..b474247ae 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -3,7 +3,6 @@ package toml import ( "fmt" "math" - "os" "strconv" "strings" "time" @@ -17,7 +16,6 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. - tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -32,8 +30,6 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { - _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") - defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -73,10 +69,9 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]any), - lx: lex(data, tomlNext), + lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]struct{}), - tomlNext: tomlNext, } for { item := p.next() @@ -350,17 +345,14 @@ func (p *parser) valueFloat(it item) (any, tomlType) { var dtTypes = []struct { fmt string zone *time.Location - next bool }{ - {time.RFC3339Nano, time.Local, false}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, - {"2006-01-02", internal.LocalDate, false}, - {"15:04:05.999999999", internal.LocalTime, false}, - - // tomlNext - {"2006-01-02T15:04Z07:00", time.Local, true}, - {"2006-01-02T15:04", internal.LocalDatetime, true}, - {"15:04", internal.LocalTime, true}, + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, + {"2006-01-02T15:04Z07:00", time.Local}, + {"2006-01-02T15:04", internal.LocalDatetime}, + {"15:04", internal.LocalTime}, } func (p *parser) valueDatetime(it item) (any, tomlType) { @@ -371,9 +363,6 @@ func (p *parser) valueDatetime(it item) (any, tomlType) { err error ) for _, dt := range dtTypes { - if dt.next && !p.tomlNext { - continue - } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { if missingLeadingZero(it.val, dt.fmt) { @@ -644,6 +633,11 @@ func (p *parser) setValue(key string, value any) { // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. if p.isArray(keyContext) { + if !p.isImplicit(keyContext) { + if _, ok := hash[key]; ok { + p.panicf("Key '%s' has already been defined.", keyContext) + } + } p.removeImplicit(keyContext) hash[key] = value return @@ -802,10 +796,8 @@ func (p *parser) replaceEscapes(it item, str string) string { b.WriteByte(0x0d) skip = 1 case 'e': - if p.tomlNext { - b.WriteByte(0x1b) - skip = 1 - } + b.WriteByte(0x1b) + skip = 1 case '"': b.WriteByte(0x22) skip = 1 @@ -815,11 +807,9 @@ func (p *parser) replaceEscapes(it item, str string) string { // The lexer guarantees the correct number of characters are present; // don't need to check here. case 'x': - if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) - b.WriteRune(escaped) - skip = 3 - } + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 case 'u': escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) b.WriteRune(escaped) diff --git a/vendor/github.com/charmbracelet/colorprofile/.golangci.yml b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml index 929cb0ac9..c90f03161 100644 --- a/vendor/github.com/charmbracelet/colorprofile/.golangci.yml +++ b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml @@ -33,6 +33,9 @@ linters: generated: lax presets: - common-false-positives + settings: + exhaustive: + default-signifies-exhaustive: true issues: max-issues-per-linter: 0 max-same-issues: 0 diff --git a/vendor/github.com/charmbracelet/colorprofile/env.go b/vendor/github.com/charmbracelet/colorprofile/env.go index 749f989e2..3547324a8 100644 --- a/vendor/github.com/charmbracelet/colorprofile/env.go +++ b/vendor/github.com/charmbracelet/colorprofile/env.go @@ -83,8 +83,8 @@ func colorProfile(isatty bool, env environ) (p Profile) { } if envNoColor(env) && isatty { - if p > Ascii { - p = Ascii + if p > ASCII { + p = ASCII } return //nolint:nakedret } @@ -153,29 +153,24 @@ func envColorProfile(env environ) (p Profile) { p = ANSI } - parts := strings.Split(term, "-") - switch parts[0] { - case "alacritty", - "contour", - "foot", - "ghostty", - "kitty", - "rio", - "st", - "wezterm": + switch { + case strings.Contains(term, "alacritty"), + strings.Contains(term, "contour"), + strings.Contains(term, "foot"), + strings.Contains(term, "ghostty"), + strings.Contains(term, "kitty"), + strings.Contains(term, "rio"), + strings.Contains(term, "st"), + strings.Contains(term, "wezterm"): return TrueColor - case "xterm": - if len(parts) > 1 { - switch parts[1] { - case "ghostty", "kitty": - // These terminals can be defined as xterm-TERMNAME - return TrueColor - } - } - case "tmux", "screen": + case strings.HasPrefix(term, "tmux"), strings.HasPrefix(term, "screen"): if p < ANSI256 { p = ANSI256 } + case strings.HasPrefix(term, "xterm"): + if p < ANSI { + p = ANSI + } } if isCloudShell, _ := strconv.ParseBool(env.get("GOOGLE_CLOUD_SHELL")); isCloudShell { diff --git a/vendor/github.com/charmbracelet/colorprofile/profile.go b/vendor/github.com/charmbracelet/colorprofile/profile.go index 4eb77ca43..4ff7f6897 100644 --- a/vendor/github.com/charmbracelet/colorprofile/profile.go +++ b/vendor/github.com/charmbracelet/colorprofile/profile.go @@ -11,10 +11,12 @@ import ( type Profile byte const ( + // Unknown is a profile that represents the absence of a profile. + Unknown Profile = iota // NoTTY is a profile with no terminal support. - NoTTY Profile = iota - // Ascii is a profile with no color support. - Ascii //nolint:revive + NoTTY + // ASCII is a profile with no color support. + ASCII // ANSI is a profile with 16 colors (4-bit). ANSI // ANSI256 is a profile with 256 colors (8-bit). @@ -23,6 +25,9 @@ const ( TrueColor ) +// Ascii is an alias for the [ASCII] profile for backwards compatibility. +const Ascii = ASCII //nolint:revive + // String returns the string representation of a Profile. func (p Profile) String() string { switch p { @@ -32,12 +37,13 @@ func (p Profile) String() string { return "ANSI256" case ANSI: return "ANSI" - case Ascii: + case ASCII: return "Ascii" case NoTTY: return "NoTTY" + default: + return "Unknown" } - return "Unknown" } var ( @@ -50,7 +56,7 @@ var ( // Convert transforms a given Color to a Color supported within the Profile. func (p Profile) Convert(c color.Color) (cc color.Color) { - if p <= Ascii { + if p <= ASCII { return nil } if p == TrueColor { @@ -90,11 +96,13 @@ func (p Profile) Convert(c color.Color) (cc color.Color) { return c default: - if p == ANSI256 { + switch p { + case ANSI256: return ansi.Convert256(c) - } else if p == ANSI { + case ANSI: return ansi.Convert16(c) + default: + return c } - return c } } diff --git a/vendor/github.com/charmbracelet/colorprofile/writer.go b/vendor/github.com/charmbracelet/colorprofile/writer.go index 47f0c6eb9..1a88e2b7b 100644 --- a/vendor/github.com/charmbracelet/colorprofile/writer.go +++ b/vendor/github.com/charmbracelet/colorprofile/writer.go @@ -36,12 +36,12 @@ type Writer struct { // Write writes the given text to the underlying writer. func (w *Writer) Write(p []byte) (int, error) { - switch w.Profile { - case TrueColor: + switch { + case w.Profile == TrueColor: return w.Forward.Write(p) //nolint:wrapcheck - case NoTTY: + case w.Profile <= NoTTY: return io.WriteString(w.Forward, ansi.Strip(string(p))) //nolint:wrapcheck - case Ascii, ANSI, ANSI256: + case w.Profile == ASCII, w.Profile == ANSI, w.Profile == ANSI256: return w.downsample(p) default: return 0, fmt.Errorf("invalid profile: %v", w.Profile) @@ -112,7 +112,7 @@ func handleSgr(w *Writer, p *ansi.Parser, buf *bytes.Buffer) { if w.Profile < ANSI { continue } - style = style.DefaultForegroundColor() + style = style.ForegroundColor(nil) case 40, 41, 42, 43, 44, 45, 46, 47: // 8-bit background color if w.Profile < ANSI { continue @@ -132,7 +132,7 @@ func handleSgr(w *Writer, p *ansi.Parser, buf *bytes.Buffer) { if w.Profile < ANSI { continue } - style = style.DefaultBackgroundColor() + style = style.BackgroundColor(nil) case 58: // 16 or 24-bit underline color var c color.Color if n := ansi.ReadStyleColor(params[i:], &c); n > 0 { @@ -146,7 +146,7 @@ func handleSgr(w *Writer, p *ansi.Parser, buf *bytes.Buffer) { if w.Profile < ANSI { continue } - style = style.DefaultUnderlineColor() + style = style.UnderlineColor(nil) case 90, 91, 92, 93, 94, 95, 96, 97: // 8-bit bright foreground color if w.Profile < ANSI { continue diff --git a/vendor/github.com/charmbracelet/x/ansi/cursor.go b/vendor/github.com/charmbracelet/x/ansi/cursor.go index 4adf68964..3c0f8912e 100644 --- a/vendor/github.com/charmbracelet/x/ansi/cursor.go +++ b/vendor/github.com/charmbracelet/x/ansi/cursor.go @@ -261,7 +261,7 @@ func CHA(col int) string { // // See: https://vt100.net/docs/vt510-rm/CUP.html func CursorPosition(col, row int) string { - if row <= 0 && col <= 0 { + if row <= 1 && col <= 1 { return CursorHomePosition } @@ -281,7 +281,9 @@ func CUP(col, row int) string { } // CursorHomePosition is a sequence for moving the cursor to the upper left -// corner of the scrolling region. This is equivalent to `CursorPosition(1, 1)`. +// corner of the scrolling region. +// +// This is equivalent to [CursorPosition](1, 1). const CursorHomePosition = "\x1b[H" // SetCursorPosition (CUP) returns a sequence for setting the cursor to the diff --git a/vendor/github.com/charmbracelet/x/ansi/mode.go b/vendor/github.com/charmbracelet/x/ansi/mode.go index 03c91108c..1c21a61fb 100644 --- a/vendor/github.com/charmbracelet/x/ansi/mode.go +++ b/vendor/github.com/charmbracelet/x/ansi/mode.go @@ -108,7 +108,7 @@ func DECRST(modes ...Mode) string { func setMode(reset bool, modes ...Mode) (s string) { if len(modes) == 0 { - return //nolint:nakedret + return s } cmd := "h" @@ -142,7 +142,7 @@ func setMode(reset bool, modes ...Mode) (s string) { if len(dec) > 0 { s += seq + "?" + strings.Join(dec, ";") + cmd } - return //nolint:nakedret + return s } // RequestMode (DECRQM) returns a sequence to request a mode from the terminal. @@ -228,12 +228,12 @@ func (m DECMode) Mode() int { // // See: https://vt100.net/docs/vt510-rm/KAM.html const ( - KeyboardActionMode = ANSIMode(2) - KAM = KeyboardActionMode + ModeKeyboardAction = ANSIMode(2) + KAM = ModeKeyboardAction - SetKeyboardActionMode = "\x1b[2h" - ResetKeyboardActionMode = "\x1b[2l" - RequestKeyboardActionMode = "\x1b[2$p" + SetModeKeyboardAction = "\x1b[2h" + ResetModeKeyboardAction = "\x1b[2l" + RequestModeKeyboardAction = "\x1b[2$p" ) // Insert/Replace Mode (IRM) is a mode that determines whether characters are @@ -245,12 +245,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/IRM.html const ( - InsertReplaceMode = ANSIMode(4) - IRM = InsertReplaceMode + ModeInsertReplace = ANSIMode(4) + IRM = ModeInsertReplace - SetInsertReplaceMode = "\x1b[4h" - ResetInsertReplaceMode = "\x1b[4l" - RequestInsertReplaceMode = "\x1b[4$p" + SetModeInsertReplace = "\x1b[4h" + ResetModeInsertReplace = "\x1b[4l" + RequestModeInsertReplace = "\x1b[4$p" ) // BiDirectional Support Mode (BDSM) is a mode that determines whether the @@ -260,12 +260,12 @@ const ( // // See ECMA-48 7.2.1. const ( - BiDirectionalSupportMode = ANSIMode(8) - BDSM = BiDirectionalSupportMode + ModeBiDirectionalSupport = ANSIMode(8) + BDSM = ModeBiDirectionalSupport - SetBiDirectionalSupportMode = "\x1b[8h" - ResetBiDirectionalSupportMode = "\x1b[8l" - RequestBiDirectionalSupportMode = "\x1b[8$p" + SetModeBiDirectionalSupport = "\x1b[8h" + ResetModeBiDirectionalSupport = "\x1b[8l" + RequestModeBiDirectionalSupport = "\x1b[8$p" ) // Send Receive Mode (SRM) or Local Echo Mode is a mode that determines whether @@ -274,17 +274,17 @@ const ( // // See: https://vt100.net/docs/vt510-rm/SRM.html const ( - SendReceiveMode = ANSIMode(12) - LocalEchoMode = SendReceiveMode - SRM = SendReceiveMode + ModeSendReceive = ANSIMode(12) + ModeLocalEcho = ModeSendReceive + SRM = ModeSendReceive - SetSendReceiveMode = "\x1b[12h" - ResetSendReceiveMode = "\x1b[12l" - RequestSendReceiveMode = "\x1b[12$p" + SetModeSendReceive = "\x1b[12h" + ResetModeSendReceive = "\x1b[12l" + RequestModeSendReceive = "\x1b[12$p" - SetLocalEchoMode = "\x1b[12h" - ResetLocalEchoMode = "\x1b[12l" - RequestLocalEchoMode = "\x1b[12$p" + SetModeLocalEcho = "\x1b[12h" + ResetModeLocalEcho = "\x1b[12l" + RequestModeLocalEcho = "\x1b[12$p" ) // Line Feed/New Line Mode (LNM) is a mode that determines whether the terminal @@ -299,12 +299,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/LNM.html const ( - LineFeedNewLineMode = ANSIMode(20) - LNM = LineFeedNewLineMode + ModeLineFeedNewLine = ANSIMode(20) + LNM = ModeLineFeedNewLine - SetLineFeedNewLineMode = "\x1b[20h" - ResetLineFeedNewLineMode = "\x1b[20l" - RequestLineFeedNewLineMode = "\x1b[20$p" + SetModeLineFeedNewLine = "\x1b[20h" + ResetModeLineFeedNewLine = "\x1b[20l" + RequestModeLineFeedNewLine = "\x1b[20$p" ) // Cursor Keys Mode (DECCKM) is a mode that determines whether the cursor keys @@ -312,18 +312,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECCKM.html const ( - CursorKeysMode = DECMode(1) - DECCKM = CursorKeysMode + ModeCursorKeys = DECMode(1) + DECCKM = ModeCursorKeys - SetCursorKeysMode = "\x1b[?1h" - ResetCursorKeysMode = "\x1b[?1l" - RequestCursorKeysMode = "\x1b[?1$p" -) - -// Deprecated: use [SetCursorKeysMode] and [ResetCursorKeysMode] instead. -const ( - EnableCursorKeys = "\x1b[?1h" //nolint:revive // grouped constants - DisableCursorKeys = "\x1b[?1l" + SetModeCursorKeys = "\x1b[?1h" + ResetModeCursorKeys = "\x1b[?1l" + RequestModeCursorKeys = "\x1b[?1$p" ) // Origin Mode (DECOM) is a mode that determines whether the cursor moves to the @@ -331,12 +325,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECOM.html const ( - OriginMode = DECMode(6) - DECOM = OriginMode + ModeOrigin = DECMode(6) + DECOM = ModeOrigin - SetOriginMode = "\x1b[?6h" - ResetOriginMode = "\x1b[?6l" - RequestOriginMode = "\x1b[?6$p" + SetModeOrigin = "\x1b[?6h" + ResetModeOrigin = "\x1b[?6l" + RequestModeOrigin = "\x1b[?6$p" ) // Auto Wrap Mode (DECAWM) is a mode that determines whether the cursor wraps @@ -344,12 +338,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECAWM.html const ( - AutoWrapMode = DECMode(7) - DECAWM = AutoWrapMode + ModeAutoWrap = DECMode(7) + DECAWM = ModeAutoWrap - SetAutoWrapMode = "\x1b[?7h" - ResetAutoWrapMode = "\x1b[?7l" - RequestAutoWrapMode = "\x1b[?7$p" + SetModeAutoWrap = "\x1b[?7h" + ResetModeAutoWrap = "\x1b[?7l" + RequestModeAutoWrap = "\x1b[?7$p" ) // X10 Mouse Mode is a mode that determines whether the mouse reports on button @@ -364,39 +358,29 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - X10MouseMode = DECMode(9) + ModeMouseX10 = DECMode(9) - SetX10MouseMode = "\x1b[?9h" - ResetX10MouseMode = "\x1b[?9l" - RequestX10MouseMode = "\x1b[?9$p" + SetModeMouseX10 = "\x1b[?9h" + ResetModeMouseX10 = "\x1b[?9l" + RequestModeMouseX10 = "\x1b[?9$p" ) // Text Cursor Enable Mode (DECTCEM) is a mode that shows/hides the cursor. // // See: https://vt100.net/docs/vt510-rm/DECTCEM.html const ( - TextCursorEnableMode = DECMode(25) - DECTCEM = TextCursorEnableMode + ModeTextCursorEnable = DECMode(25) + DECTCEM = ModeTextCursorEnable - SetTextCursorEnableMode = "\x1b[?25h" - ResetTextCursorEnableMode = "\x1b[?25l" - RequestTextCursorEnableMode = "\x1b[?25$p" + SetModeTextCursorEnable = "\x1b[?25h" + ResetModeTextCursorEnable = "\x1b[?25l" + RequestModeTextCursorEnable = "\x1b[?25$p" ) -// These are aliases for [SetTextCursorEnableMode] and [ResetTextCursorEnableMode]. +// These are aliases for [SetModeTextCursorEnable] and [ResetModeTextCursorEnable]. const ( - ShowCursor = SetTextCursorEnableMode - HideCursor = ResetTextCursorEnableMode -) - -// Text Cursor Enable Mode (DECTCEM) is a mode that shows/hides the cursor. -// -// See: https://vt100.net/docs/vt510-rm/DECTCEM.html -// -// Deprecated: use [SetTextCursorEnableMode] and [ResetTextCursorEnableMode] instead. -const ( - CursorEnableMode = DECMode(25) - RequestCursorVisibility = "\x1b[?25$p" + ShowCursor = SetModeTextCursorEnable + HideCursor = ResetModeTextCursorEnable ) // Numeric Keypad Mode (DECNKM) is a mode that determines whether the keypad @@ -406,12 +390,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECNKM.html const ( - NumericKeypadMode = DECMode(66) - DECNKM = NumericKeypadMode + ModeNumericKeypad = DECMode(66) + DECNKM = ModeNumericKeypad - SetNumericKeypadMode = "\x1b[?66h" - ResetNumericKeypadMode = "\x1b[?66l" - RequestNumericKeypadMode = "\x1b[?66$p" + SetModeNumericKeypad = "\x1b[?66h" + ResetModeNumericKeypad = "\x1b[?66l" + RequestModeNumericKeypad = "\x1b[?66$p" ) // Backarrow Key Mode (DECBKM) is a mode that determines whether the backspace @@ -419,12 +403,12 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECBKM.html const ( - BackarrowKeyMode = DECMode(67) - DECBKM = BackarrowKeyMode + ModeBackarrowKey = DECMode(67) + DECBKM = ModeBackarrowKey - SetBackarrowKeyMode = "\x1b[?67h" - ResetBackarrowKeyMode = "\x1b[?67l" - RequestBackarrowKeyMode = "\x1b[?67$p" + SetModeBackarrowKey = "\x1b[?67h" + ResetModeBackarrowKey = "\x1b[?67l" + RequestModeBackarrowKey = "\x1b[?67$p" ) // Left Right Margin Mode (DECLRMM) is a mode that determines whether the left @@ -432,47 +416,33 @@ const ( // // See: https://vt100.net/docs/vt510-rm/DECLRMM.html const ( - LeftRightMarginMode = DECMode(69) - DECLRMM = LeftRightMarginMode + ModeLeftRightMargin = DECMode(69) + DECLRMM = ModeLeftRightMargin - SetLeftRightMarginMode = "\x1b[?69h" - ResetLeftRightMarginMode = "\x1b[?69l" - RequestLeftRightMarginMode = "\x1b[?69$p" + SetModeLeftRightMargin = "\x1b[?69h" + ResetModeLeftRightMargin = "\x1b[?69l" + RequestModeLeftRightMargin = "\x1b[?69$p" ) // Normal Mouse Mode is a mode that determines whether the mouse reports on // button presses and releases. It will also report modifier keys, wheel // events, and extra buttons. // -// It uses the same encoding as [X10MouseMode] with a few differences: +// It uses the same encoding as [ModeMouseX10] with a few differences: // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - NormalMouseMode = DECMode(1000) + ModeMouseNormal = DECMode(1000) - SetNormalMouseMode = "\x1b[?1000h" - ResetNormalMouseMode = "\x1b[?1000l" - RequestNormalMouseMode = "\x1b[?1000$p" -) - -// VT Mouse Tracking is a mode that determines whether the mouse reports on -// button press and release. -// -// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking -// -// Deprecated: use [NormalMouseMode] instead. -const ( - MouseMode = DECMode(1000) - - EnableMouse = "\x1b[?1000h" - DisableMouse = "\x1b[?1000l" - RequestMouse = "\x1b[?1000$p" + SetModeMouseNormal = "\x1b[?1000h" + ResetModeMouseNormal = "\x1b[?1000l" + RequestModeMouseNormal = "\x1b[?1000$p" ) // Highlight Mouse Tracking is a mode that determines whether the mouse reports // on button presses, releases, and highlighted cells. // -// It uses the same encoding as [NormalMouseMode] with a few differences: +// It uses the same encoding as [ModeMouseNormal] with a few differences: // // On highlight events, the terminal responds with the following encoding: // @@ -481,11 +451,11 @@ const ( // // Where the parameters are startx, starty, endx, endy, mousex, and mousey. const ( - HighlightMouseMode = DECMode(1001) + ModeMouseHighlight = DECMode(1001) - SetHighlightMouseMode = "\x1b[?1001h" - ResetHighlightMouseMode = "\x1b[?1001l" - RequestHighlightMouseMode = "\x1b[?1001$p" + SetModeMouseHighlight = "\x1b[?1001h" + ResetModeMouseHighlight = "\x1b[?1001l" + RequestModeMouseHighlight = "\x1b[?1001$p" ) // VT Hilite Mouse Tracking is a mode that determines whether the mouse reports on @@ -493,65 +463,29 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking // -// Deprecated: use [HighlightMouseMode] instead. -const ( - MouseHiliteMode = DECMode(1001) - EnableMouseHilite = "\x1b[?1001h" - DisableMouseHilite = "\x1b[?1001l" - RequestMouseHilite = "\x1b[?1001$p" -) - -// Button Event Mouse Tracking is essentially the same as [NormalMouseMode], +// Button Event Mouse Tracking is essentially the same as [ModeMouseNormal], // but it also reports button-motion events when a button is pressed. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - ButtonEventMouseMode = DECMode(1002) + ModeMouseButtonEvent = DECMode(1002) - SetButtonEventMouseMode = "\x1b[?1002h" - ResetButtonEventMouseMode = "\x1b[?1002l" - RequestButtonEventMouseMode = "\x1b[?1002$p" + SetModeMouseButtonEvent = "\x1b[?1002h" + ResetModeMouseButtonEvent = "\x1b[?1002l" + RequestModeMouseButtonEvent = "\x1b[?1002$p" ) -// Cell Motion Mouse Tracking is a mode that determines whether the mouse -// reports on button press, release, and motion events. -// -// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking -// -// Deprecated: use [ButtonEventMouseMode] instead. -const ( - MouseCellMotionMode = DECMode(1002) - - EnableMouseCellMotion = "\x1b[?1002h" - DisableMouseCellMotion = "\x1b[?1002l" - RequestMouseCellMotion = "\x1b[?1002$p" -) - -// Any Event Mouse Tracking is the same as [ButtonEventMouseMode], except that +// Any Event Mouse Tracking is the same as [ModeMouseButtonEvent], except that // all motion events are reported even if no mouse buttons are pressed. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - AnyEventMouseMode = DECMode(1003) + ModeMouseAnyEvent = DECMode(1003) - SetAnyEventMouseMode = "\x1b[?1003h" - ResetAnyEventMouseMode = "\x1b[?1003l" - RequestAnyEventMouseMode = "\x1b[?1003$p" -) - -// All Mouse Tracking is a mode that determines whether the mouse reports on -// button press, release, motion, and highlight events. -// -// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking -// -// Deprecated: use [AnyEventMouseMode] instead. -const ( - MouseAllMotionMode = DECMode(1003) - - EnableMouseAllMotion = "\x1b[?1003h" - DisableMouseAllMotion = "\x1b[?1003l" - RequestMouseAllMotion = "\x1b[?1003$p" + SetModeMouseAnyEvent = "\x1b[?1003h" + ResetModeMouseAnyEvent = "\x1b[?1003l" + RequestModeMouseAnyEvent = "\x1b[?1003$p" ) // Focus Event Mode is a mode that determines whether the terminal reports focus @@ -564,22 +498,11 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Focus-Tracking const ( - FocusEventMode = DECMode(1004) + ModeFocusEvent = DECMode(1004) - SetFocusEventMode = "\x1b[?1004h" - ResetFocusEventMode = "\x1b[?1004l" - RequestFocusEventMode = "\x1b[?1004$p" -) - -// Deprecated: use [SetFocusEventMode], [ResetFocusEventMode], and -// [RequestFocusEventMode] instead. -// Focus reporting mode constants. -const ( - ReportFocusMode = DECMode(1004) //nolint:revive // grouped constants - - EnableReportFocus = "\x1b[?1004h" - DisableReportFocus = "\x1b[?1004l" - RequestReportFocus = "\x1b[?1004$p" + SetModeFocusEvent = "\x1b[?1004h" + ResetModeFocusEvent = "\x1b[?1004l" + RequestModeFocusEvent = "\x1b[?1004$p" ) // SGR Extended Mouse Mode is a mode that changes the mouse tracking encoding @@ -589,24 +512,15 @@ const ( // // CSI < Cb ; Cx ; Cy M // -// Where Cb is the same as [NormalMouseMode], and Cx and Cy are the x and y. +// Where Cb is the same as [ModeMouseNormal], and Cx and Cy are the x and y. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - SgrExtMouseMode = DECMode(1006) + ModeMouseExtSgr = DECMode(1006) - SetSgrExtMouseMode = "\x1b[?1006h" - ResetSgrExtMouseMode = "\x1b[?1006l" - RequestSgrExtMouseMode = "\x1b[?1006$p" -) - -// Deprecated: use [SgrExtMouseMode] [SetSgrExtMouseMode], -// [ResetSgrExtMouseMode], and [RequestSgrExtMouseMode] instead. -const ( - MouseSgrExtMode = DECMode(1006) //nolint:revive // grouped constants - EnableMouseSgrExt = "\x1b[?1006h" - DisableMouseSgrExt = "\x1b[?1006l" - RequestMouseSgrExt = "\x1b[?1006$p" + SetModeMouseExtSgr = "\x1b[?1006h" + ResetModeMouseExtSgr = "\x1b[?1006l" + RequestModeMouseExtSgr = "\x1b[?1006$p" ) // UTF-8 Extended Mouse Mode is a mode that changes the mouse tracking encoding @@ -614,11 +528,11 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - Utf8ExtMouseMode = DECMode(1005) + ModeMouseExtUtf8 = DECMode(1005) - SetUtf8ExtMouseMode = "\x1b[?1005h" - ResetUtf8ExtMouseMode = "\x1b[?1005l" - RequestUtf8ExtMouseMode = "\x1b[?1005$p" + SetModeMouseExtUtf8 = "\x1b[?1005h" + ResetModeMouseExtUtf8 = "\x1b[?1005l" + RequestModeMouseExtUtf8 = "\x1b[?1005$p" ) // URXVT Extended Mouse Mode is a mode that changes the mouse tracking encoding @@ -626,25 +540,25 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - UrxvtExtMouseMode = DECMode(1015) + ModeMouseExtUrxvt = DECMode(1015) - SetUrxvtExtMouseMode = "\x1b[?1015h" - ResetUrxvtExtMouseMode = "\x1b[?1015l" - RequestUrxvtExtMouseMode = "\x1b[?1015$p" + SetModeMouseExtUrxvt = "\x1b[?1015h" + ResetModeMouseExtUrxvt = "\x1b[?1015l" + RequestModeMouseExtUrxvt = "\x1b[?1015$p" ) // SGR Pixel Extended Mouse Mode is a mode that changes the mouse tracking // encoding to use SGR parameters with pixel coordinates. // -// This is similar to [SgrExtMouseMode], but also reports pixel coordinates. +// This is similar to [ModeMouseExtSgr], but also reports pixel coordinates. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( - SgrPixelExtMouseMode = DECMode(1016) + ModeMouseExtSgrPixel = DECMode(1016) - SetSgrPixelExtMouseMode = "\x1b[?1016h" - ResetSgrPixelExtMouseMode = "\x1b[?1016l" - RequestSgrPixelExtMouseMode = "\x1b[?1016$p" + SetModeMouseExtSgrPixel = "\x1b[?1016h" + ResetModeMouseExtSgrPixel = "\x1b[?1016l" + RequestModeMouseExtSgrPixel = "\x1b[?1016$p" ) // Alternate Screen Mode is a mode that determines whether the alternate screen @@ -653,11 +567,11 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer const ( - AltScreenMode = DECMode(1047) + ModeAltScreen = DECMode(1047) - SetAltScreenMode = "\x1b[?1047h" - ResetAltScreenMode = "\x1b[?1047l" - RequestAltScreenMode = "\x1b[?1047$p" + SetModeAltScreen = "\x1b[?1047h" + ResetModeAltScreen = "\x1b[?1047l" + RequestModeAltScreen = "\x1b[?1047$p" ) // Save Cursor Mode is a mode that saves the cursor position. @@ -665,42 +579,24 @@ const ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer const ( - SaveCursorMode = DECMode(1048) + ModeSaveCursor = DECMode(1048) - SetSaveCursorMode = "\x1b[?1048h" - ResetSaveCursorMode = "\x1b[?1048l" - RequestSaveCursorMode = "\x1b[?1048$p" + SetModeSaveCursor = "\x1b[?1048h" + ResetModeSaveCursor = "\x1b[?1048l" + RequestModeSaveCursor = "\x1b[?1048$p" ) // Alternate Screen Save Cursor Mode is a mode that saves the cursor position as in -// [SaveCursorMode], switches to the alternate screen buffer as in [AltScreenMode], +// [ModeSaveCursor], switches to the alternate screen buffer as in [ModeAltScreen], // and clears the screen on switch. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer const ( - AltScreenSaveCursorMode = DECMode(1049) + ModeAltScreenSaveCursor = DECMode(1049) - SetAltScreenSaveCursorMode = "\x1b[?1049h" - ResetAltScreenSaveCursorMode = "\x1b[?1049l" - RequestAltScreenSaveCursorMode = "\x1b[?1049$p" -) - -// Alternate Screen Buffer is a mode that determines whether the alternate screen -// buffer is active. -// -// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer -// -// Deprecated: use [AltScreenSaveCursorMode] instead. -const ( - AltScreenBufferMode = DECMode(1049) - - SetAltScreenBufferMode = "\x1b[?1049h" - ResetAltScreenBufferMode = "\x1b[?1049l" - RequestAltScreenBufferMode = "\x1b[?1049$p" - - EnableAltScreenBuffer = "\x1b[?1049h" - DisableAltScreenBuffer = "\x1b[?1049l" - RequestAltScreenBuffer = "\x1b[?1049$p" + SetModeAltScreenSaveCursor = "\x1b[?1049h" + ResetModeAltScreenSaveCursor = "\x1b[?1049l" + RequestModeAltScreenSaveCursor = "\x1b[?1049$p" ) // Bracketed Paste Mode is a mode that determines whether pasted text is @@ -709,19 +605,11 @@ const ( // See: https://cirw.in/blog/bracketed-paste // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Bracketed-Paste-Mode const ( - BracketedPasteMode = DECMode(2004) + ModeBracketedPaste = DECMode(2004) - SetBracketedPasteMode = "\x1b[?2004h" - ResetBracketedPasteMode = "\x1b[?2004l" - RequestBracketedPasteMode = "\x1b[?2004$p" -) - -// Deprecated: use [SetBracketedPasteMode], [ResetBracketedPasteMode], and -// [RequestBracketedPasteMode] instead. -const ( - EnableBracketedPaste = "\x1b[?2004h" //nolint:revive // grouped constants - DisableBracketedPaste = "\x1b[?2004l" - RequestBracketedPaste = "\x1b[?2004$p" + SetModeBracketedPaste = "\x1b[?2004h" + ResetModeBracketedPaste = "\x1b[?2004l" + RequestModeBracketedPaste = "\x1b[?2004$p" ) // Synchronized Output Mode is a mode that determines whether output is @@ -729,23 +617,11 @@ const ( // // See: https://gist.github.com/christianparpart/d8a62cc1ab659194337d73e399004036 const ( - SynchronizedOutputMode = DECMode(2026) + ModeSynchronizedOutput = DECMode(2026) - SetSynchronizedOutputMode = "\x1b[?2026h" - ResetSynchronizedOutputMode = "\x1b[?2026l" - RequestSynchronizedOutputMode = "\x1b[?2026$p" -) - -// Synchronized Output Mode. See [SynchronizedOutputMode]. -// -// Deprecated: use [SynchronizedOutputMode], [SetSynchronizedOutputMode], and -// [ResetSynchronizedOutputMode], and [RequestSynchronizedOutputMode] instead. -const ( - SyncdOutputMode = DECMode(2026) - - EnableSyncdOutput = "\x1b[?2026h" - DisableSyncdOutput = "\x1b[?2026l" - RequestSyncdOutput = "\x1b[?2026$p" + SetModeSynchronizedOutput = "\x1b[?2026h" + ResetModeSynchronizedOutput = "\x1b[?2026l" + RequestModeSynchronizedOutput = "\x1b[?2026$p" ) // Unicode Core Mode is a mode that determines whether the terminal should use @@ -754,41 +630,16 @@ const ( // // See: https://github.com/contour-terminal/terminal-unicode-core const ( - UnicodeCoreMode = DECMode(2027) + ModeUnicodeCore = DECMode(2027) - SetUnicodeCoreMode = "\x1b[?2027h" - ResetUnicodeCoreMode = "\x1b[?2027l" - RequestUnicodeCoreMode = "\x1b[?2027$p" + SetModeUnicodeCore = "\x1b[?2027h" + ResetModeUnicodeCore = "\x1b[?2027l" + RequestModeUnicodeCore = "\x1b[?2027$p" ) -// Grapheme Clustering Mode is a mode that determines whether the terminal -// should look for grapheme clusters instead of single runes in the rendered -// text. This makes the terminal properly render combining characters such as -// emojis. // -// See: https://github.com/contour-terminal/terminal-unicode-core -// -// Deprecated: use [GraphemeClusteringMode], [SetUnicodeCoreMode], -// [ResetUnicodeCoreMode], and [RequestUnicodeCoreMode] instead. -const ( - GraphemeClusteringMode = DECMode(2027) - SetGraphemeClusteringMode = "\x1b[?2027h" - ResetGraphemeClusteringMode = "\x1b[?2027l" - RequestGraphemeClusteringMode = "\x1b[?2027$p" -) - -// Grapheme Clustering Mode. See [GraphemeClusteringMode]. -// -// Deprecated: use [SetUnicodeCoreMode], [ResetUnicodeCoreMode], and -// [RequestUnicodeCoreMode] instead. -const ( - EnableGraphemeClustering = "\x1b[?2027h" - DisableGraphemeClustering = "\x1b[?2027l" - RequestGraphemeClustering = "\x1b[?2027$p" -) - -// LightDarkMode is a mode that enables reporting the operating system's color +// ModeLightDark is a mode that enables reporting the operating system's color // scheme (light or dark) preference. It reports the color scheme as a [DSR] // and [LightDarkReport] escape sequences encoded as follows: // @@ -802,14 +653,14 @@ const ( // // See: https://contour-terminal.org/vt-extensions/color-palette-update-notifications/ const ( - LightDarkMode = DECMode(2031) + ModeLightDark = DECMode(2031) - SetLightDarkMode = "\x1b[?2031h" - ResetLightDarkMode = "\x1b[?2031l" - RequestLightDarkMode = "\x1b[?2031$p" + SetModeLightDark = "\x1b[?2031h" + ResetModeLightDark = "\x1b[?2031l" + RequestModeLightDark = "\x1b[?2031$p" ) -// InBandResizeMode is a mode that reports terminal resize events as escape +// ModeInBandResize is a mode that reports terminal resize events as escape // sequences. This is useful for systems that do not support [SIGWINCH] like // Windows. // @@ -819,11 +670,11 @@ const ( // // See: https://gist.github.com/rockorager/e695fb2924d36b2bcf1fff4a3704bd83 const ( - InBandResizeMode = DECMode(2048) + ModeInBandResize = DECMode(2048) - SetInBandResizeMode = "\x1b[?2048h" - ResetInBandResizeMode = "\x1b[?2048l" - RequestInBandResizeMode = "\x1b[?2048$p" + SetModeInBandResize = "\x1b[?2048h" + ResetModeInBandResize = "\x1b[?2048l" + RequestModeInBandResize = "\x1b[?2048$p" ) // Win32Input is a mode that determines whether input is processed by the @@ -831,17 +682,9 @@ const ( // // See: https://github.com/microsoft/terminal/blob/main/doc/specs/%234999%20-%20Improved%20keyboard%20handling%20in%20Conpty.md const ( - Win32InputMode = DECMode(9001) + ModeWin32Input = DECMode(9001) - SetWin32InputMode = "\x1b[?9001h" - ResetWin32InputMode = "\x1b[?9001l" - RequestWin32InputMode = "\x1b[?9001$p" -) - -// Deprecated: use [SetWin32InputMode], [ResetWin32InputMode], and -// [RequestWin32InputMode] instead. -const ( - EnableWin32Input = "\x1b[?9001h" //nolint:revive // grouped constants - DisableWin32Input = "\x1b[?9001l" - RequestWin32Input = "\x1b[?9001$p" + SetModeWin32Input = "\x1b[?9001h" + ResetModeWin32Input = "\x1b[?9001l" + RequestModeWin32Input = "\x1b[?9001$p" ) diff --git a/vendor/github.com/charmbracelet/x/ansi/mode_deprecated.go b/vendor/github.com/charmbracelet/x/ansi/mode_deprecated.go new file mode 100644 index 000000000..323d5fb53 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/mode_deprecated.go @@ -0,0 +1,495 @@ +package ansi + +// Keyboard Action Mode (KAM) controls locking of the keyboard. +// +// Deprecated: use [ModeKeyboardAction] instead. +const ( + KeyboardActionMode = ANSIMode(2) + + SetKeyboardActionMode = "\x1b[2h" + ResetKeyboardActionMode = "\x1b[2l" + RequestKeyboardActionMode = "\x1b[2$p" +) + +// Insert/Replace Mode (IRM) determines whether characters are inserted or replaced. +// +// Deprecated: use [ModeInsertReplace] instead. +const ( + InsertReplaceMode = ANSIMode(4) + + SetInsertReplaceMode = "\x1b[4h" + ResetInsertReplaceMode = "\x1b[4l" + RequestInsertReplaceMode = "\x1b[4$p" +) + +// BiDirectional Support Mode (BDSM) determines whether the terminal supports bidirectional text. +// +// Deprecated: use [ModeBiDirectionalSupport] instead. +const ( + BiDirectionalSupportMode = ANSIMode(8) + + SetBiDirectionalSupportMode = "\x1b[8h" + ResetBiDirectionalSupportMode = "\x1b[8l" + RequestBiDirectionalSupportMode = "\x1b[8$p" +) + +// Send Receive Mode (SRM) or Local Echo Mode determines whether the terminal echoes characters. +// +// Deprecated: use [ModeSendReceive] instead. +const ( + SendReceiveMode = ANSIMode(12) + LocalEchoMode = SendReceiveMode + + SetSendReceiveMode = "\x1b[12h" + ResetSendReceiveMode = "\x1b[12l" + RequestSendReceiveMode = "\x1b[12$p" + + SetLocalEchoMode = "\x1b[12h" + ResetLocalEchoMode = "\x1b[12l" + RequestLocalEchoMode = "\x1b[12$p" +) + +// Line Feed/New Line Mode (LNM) determines whether the terminal interprets line feed as new line. +// +// Deprecated: use [ModeLineFeedNewLine] instead. +const ( + LineFeedNewLineMode = ANSIMode(20) + + SetLineFeedNewLineMode = "\x1b[20h" + ResetLineFeedNewLineMode = "\x1b[20l" + RequestLineFeedNewLineMode = "\x1b[20$p" +) + +// Cursor Keys Mode (DECCKM) determines whether cursor keys send ANSI or application sequences. +// +// Deprecated: use [ModeCursorKeys] instead. +const ( + CursorKeysMode = DECMode(1) + + SetCursorKeysMode = "\x1b[?1h" + ResetCursorKeysMode = "\x1b[?1l" + RequestCursorKeysMode = "\x1b[?1$p" +) + +// Cursor Keys mode. +// +// Deprecated: use [SetModeCursorKeys] and [ResetModeCursorKeys] instead. +const ( + EnableCursorKeys = "\x1b[?1h" + DisableCursorKeys = "\x1b[?1l" +) + +// Origin Mode (DECOM) determines whether the cursor moves to home or margin position. +// +// Deprecated: use [ModeOrigin] instead. +const ( + OriginMode = DECMode(6) + + SetOriginMode = "\x1b[?6h" + ResetOriginMode = "\x1b[?6l" + RequestOriginMode = "\x1b[?6$p" +) + +// Auto Wrap Mode (DECAWM) determines whether the cursor wraps to the next line. +// +// Deprecated: use [ModeAutoWrap] instead. +const ( + AutoWrapMode = DECMode(7) + + SetAutoWrapMode = "\x1b[?7h" + ResetAutoWrapMode = "\x1b[?7l" + RequestAutoWrapMode = "\x1b[?7$p" +) + +// X10 Mouse Mode determines whether the mouse reports on button presses. +// +// Deprecated: use [ModeMouseX10] instead. +const ( + X10MouseMode = DECMode(9) + + SetX10MouseMode = "\x1b[?9h" + ResetX10MouseMode = "\x1b[?9l" + RequestX10MouseMode = "\x1b[?9$p" +) + +// Text Cursor Enable Mode (DECTCEM) shows/hides the cursor. +// +// Deprecated: use [ModeTextCursorEnable] instead. +const ( + TextCursorEnableMode = DECMode(25) + + SetTextCursorEnableMode = "\x1b[?25h" + ResetTextCursorEnableMode = "\x1b[?25l" + RequestTextCursorEnableMode = "\x1b[?25$p" +) + +// Text Cursor Enable mode. +// +// Deprecated: use [SetModeTextCursorEnable] and [ResetModeTextCursorEnable] instead. +const ( + CursorEnableMode = DECMode(25) + RequestCursorVisibility = "\x1b[?25$p" +) + +// Numeric Keypad Mode (DECNKM) determines whether the keypad sends application or numeric sequences. +// +// Deprecated: use [ModeNumericKeypad] instead. +const ( + NumericKeypadMode = DECMode(66) + + SetNumericKeypadMode = "\x1b[?66h" + ResetNumericKeypadMode = "\x1b[?66l" + RequestNumericKeypadMode = "\x1b[?66$p" +) + +// Backarrow Key Mode (DECBKM) determines whether the backspace key sends backspace or delete. +// +// Deprecated: use [ModeBackarrowKey] instead. +const ( + BackarrowKeyMode = DECMode(67) + + SetBackarrowKeyMode = "\x1b[?67h" + ResetBackarrowKeyMode = "\x1b[?67l" + RequestBackarrowKeyMode = "\x1b[?67$p" +) + +// Left Right Margin Mode (DECLRMM) determines whether left and right margins can be set. +// +// Deprecated: use [ModeLeftRightMargin] instead. +const ( + LeftRightMarginMode = DECMode(69) + + SetLeftRightMarginMode = "\x1b[?69h" + ResetLeftRightMarginMode = "\x1b[?69l" + RequestLeftRightMarginMode = "\x1b[?69$p" +) + +// Normal Mouse Mode determines whether the mouse reports on button presses and releases. +// +// Deprecated: use [ModeMouseNormal] instead. +const ( + NormalMouseMode = DECMode(1000) + + SetNormalMouseMode = "\x1b[?1000h" + ResetNormalMouseMode = "\x1b[?1000l" + RequestNormalMouseMode = "\x1b[?1000$p" +) + +// VT Mouse Tracking mode. +// +// Deprecated: use [ModeMouseNormal] instead. +const ( + MouseMode = DECMode(1000) + + EnableMouse = "\x1b[?1000h" + DisableMouse = "\x1b[?1000l" + RequestMouse = "\x1b[?1000$p" +) + +// Highlight Mouse Tracking determines whether the mouse reports on button presses and highlighted cells. +// +// Deprecated: use [ModeMouseHighlight] instead. +const ( + HighlightMouseMode = DECMode(1001) + + SetHighlightMouseMode = "\x1b[?1001h" + ResetHighlightMouseMode = "\x1b[?1001l" + RequestHighlightMouseMode = "\x1b[?1001$p" +) + +// VT Hilite Mouse Tracking mode. +// +// Deprecated: use [ModeMouseHighlight] instead. +const ( + MouseHiliteMode = DECMode(1001) + + EnableMouseHilite = "\x1b[?1001h" + DisableMouseHilite = "\x1b[?1001l" + RequestMouseHilite = "\x1b[?1001$p" +) + +// Button Event Mouse Tracking reports button-motion events when a button is pressed. +// +// Deprecated: use [ModeMouseButtonEvent] instead. +const ( + ButtonEventMouseMode = DECMode(1002) + + SetButtonEventMouseMode = "\x1b[?1002h" + ResetButtonEventMouseMode = "\x1b[?1002l" + RequestButtonEventMouseMode = "\x1b[?1002$p" +) + +// Cell Motion Mouse Tracking mode. +// +// Deprecated: use [ModeMouseButtonEvent] instead. +const ( + MouseCellMotionMode = DECMode(1002) + + EnableMouseCellMotion = "\x1b[?1002h" + DisableMouseCellMotion = "\x1b[?1002l" + RequestMouseCellMotion = "\x1b[?1002$p" +) + +// Any Event Mouse Tracking reports all motion events. +// +// Deprecated: use [ModeMouseAnyEvent] instead. +const ( + AnyEventMouseMode = DECMode(1003) + + SetAnyEventMouseMode = "\x1b[?1003h" + ResetAnyEventMouseMode = "\x1b[?1003l" + RequestAnyEventMouseMode = "\x1b[?1003$p" +) + +// All Mouse Tracking mode. +// +// Deprecated: use [ModeMouseAnyEvent] instead. +const ( + MouseAllMotionMode = DECMode(1003) + + EnableMouseAllMotion = "\x1b[?1003h" + DisableMouseAllMotion = "\x1b[?1003l" + RequestMouseAllMotion = "\x1b[?1003$p" +) + +// Focus Event Mode determines whether the terminal reports focus and blur events. +// +// Deprecated: use [ModeFocusEvent] instead. +const ( + FocusEventMode = DECMode(1004) + + SetFocusEventMode = "\x1b[?1004h" + ResetFocusEventMode = "\x1b[?1004l" + RequestFocusEventMode = "\x1b[?1004$p" +) + +// Focus reporting mode. +// +// Deprecated: use [SetModeFocusEvent], [ResetModeFocusEvent], and +// [RequestModeFocusEvent] instead. +const ( + ReportFocusMode = DECMode(1004) + + EnableReportFocus = "\x1b[?1004h" + DisableReportFocus = "\x1b[?1004l" + RequestReportFocus = "\x1b[?1004$p" +) + +// UTF-8 Extended Mouse Mode changes the mouse tracking encoding to use UTF-8 parameters. +// +// Deprecated: use [ModeMouseExtUtf8] instead. +const ( + Utf8ExtMouseMode = DECMode(1005) + + SetUtf8ExtMouseMode = "\x1b[?1005h" + ResetUtf8ExtMouseMode = "\x1b[?1005l" + RequestUtf8ExtMouseMode = "\x1b[?1005$p" +) + +// SGR Extended Mouse Mode changes the mouse tracking encoding to use SGR parameters. +// +// Deprecated: use [ModeMouseExtSgr] instead. +const ( + SgrExtMouseMode = DECMode(1006) + + SetSgrExtMouseMode = "\x1b[?1006h" + ResetSgrExtMouseMode = "\x1b[?1006l" + RequestSgrExtMouseMode = "\x1b[?1006$p" +) + +// Mouse SGR Extended mode. +// +// Deprecated: use [ModeMouseExtSgr], [SetModeMouseExtSgr], +// [ResetModeMouseExtSgr], and [RequestModeMouseExtSgr] instead. +const ( + MouseSgrExtMode = DECMode(1006) + EnableMouseSgrExt = "\x1b[?1006h" + DisableMouseSgrExt = "\x1b[?1006l" + RequestMouseSgrExt = "\x1b[?1006$p" +) + +// URXVT Extended Mouse Mode changes the mouse tracking encoding to use an alternate encoding. +// +// Deprecated: use [ModeMouseUrxvtExt] instead. +const ( + UrxvtExtMouseMode = DECMode(1015) + + SetUrxvtExtMouseMode = "\x1b[?1015h" + ResetUrxvtExtMouseMode = "\x1b[?1015l" + RequestUrxvtExtMouseMode = "\x1b[?1015$p" +) + +// SGR Pixel Extended Mouse Mode changes the mouse tracking encoding to use SGR parameters with pixel coordinates. +// +// Deprecated: use [ModeMouseExtSgrPixel] instead. +const ( + SgrPixelExtMouseMode = DECMode(1016) + + SetSgrPixelExtMouseMode = "\x1b[?1016h" + ResetSgrPixelExtMouseMode = "\x1b[?1016l" + RequestSgrPixelExtMouseMode = "\x1b[?1016$p" +) + +// Alternate Screen Mode determines whether the alternate screen buffer is active. +// +// Deprecated: use [ModeAltScreen] instead. +const ( + AltScreenMode = DECMode(1047) + + SetAltScreenMode = "\x1b[?1047h" + ResetAltScreenMode = "\x1b[?1047l" + RequestAltScreenMode = "\x1b[?1047$p" +) + +// Save Cursor Mode saves the cursor position. +// +// Deprecated: use [ModeSaveCursor] instead. +const ( + SaveCursorMode = DECMode(1048) + + SetSaveCursorMode = "\x1b[?1048h" + ResetSaveCursorMode = "\x1b[?1048l" + RequestSaveCursorMode = "\x1b[?1048$p" +) + +// Alternate Screen Save Cursor Mode saves the cursor position and switches to alternate screen. +// +// Deprecated: use [ModeAltScreenSaveCursor] instead. +const ( + AltScreenSaveCursorMode = DECMode(1049) + + SetAltScreenSaveCursorMode = "\x1b[?1049h" + ResetAltScreenSaveCursorMode = "\x1b[?1049l" + RequestAltScreenSaveCursorMode = "\x1b[?1049$p" +) + +// Alternate Screen Buffer mode. +// +// Deprecated: use [ModeAltScreenSaveCursor] instead. +const ( + AltScreenBufferMode = DECMode(1049) + + SetAltScreenBufferMode = "\x1b[?1049h" + ResetAltScreenBufferMode = "\x1b[?1049l" + RequestAltScreenBufferMode = "\x1b[?1049$p" + + EnableAltScreenBuffer = "\x1b[?1049h" + DisableAltScreenBuffer = "\x1b[?1049l" + RequestAltScreenBuffer = "\x1b[?1049$p" +) + +// Bracketed Paste Mode determines whether pasted text is bracketed with escape sequences. +// +// Deprecated: use [ModeBracketedPaste] instead. +const ( + BracketedPasteMode = DECMode(2004) + + SetBracketedPasteMode = "\x1b[?2004h" + ResetBracketedPasteMode = "\x1b[?2004l" + RequestBracketedPasteMode = "\x1b[?2004$p" +) + +// Deprecated: use [SetModeBracketedPaste], [ResetModeBracketedPaste], and +// [RequestModeBracketedPaste] instead. +const ( + EnableBracketedPaste = "\x1b[?2004h" //nolint:revive + DisableBracketedPaste = "\x1b[?2004l" + RequestBracketedPaste = "\x1b[?2004$p" +) + +// Synchronized Output Mode determines whether output is synchronized with the terminal. +// +// Deprecated: use [ModeSynchronizedOutput] instead. +const ( + SynchronizedOutputMode = DECMode(2026) + + SetSynchronizedOutputMode = "\x1b[?2026h" + ResetSynchronizedOutputMode = "\x1b[?2026l" + RequestSynchronizedOutputMode = "\x1b[?2026$p" +) + +// Synchronized output mode. +// +// Deprecated: use [ModeSynchronizedOutput], [SetModeSynchronizedOutput], +// [ResetModeSynchronizedOutput], and [RequestModeSynchronizedOutput] instead. +const ( + SyncdOutputMode = DECMode(2026) + + EnableSyncdOutput = "\x1b[?2026h" + DisableSyncdOutput = "\x1b[?2026l" + RequestSyncdOutput = "\x1b[?2026$p" +) + +// Unicode Core Mode determines whether the terminal uses Unicode grapheme clustering. +// +// Deprecated: use [ModeUnicodeCore] instead. +const ( + UnicodeCoreMode = DECMode(2027) + + SetUnicodeCoreMode = "\x1b[?2027h" + ResetUnicodeCoreMode = "\x1b[?2027l" + RequestUnicodeCoreMode = "\x1b[?2027$p" +) + +// Grapheme Clustering Mode determines whether the terminal looks for grapheme clusters. +// +// Deprecated: use [ModeUnicodeCore], [SetModeUnicodeCore], +// [ResetModeUnicodeCore], and [RequestModeUnicodeCore] instead. +const ( + GraphemeClusteringMode = DECMode(2027) + + SetGraphemeClusteringMode = "\x1b[?2027h" + ResetGraphemeClusteringMode = "\x1b[?2027l" + RequestGraphemeClusteringMode = "\x1b[?2027$p" +) + +// Unicode Core mode. +// +// Deprecated: use [SetModeUnicodeCore], [ResetModeUnicodeCore], and +// [RequestModeUnicodeCore] instead. +const ( + EnableGraphemeClustering = "\x1b[?2027h" + DisableGraphemeClustering = "\x1b[?2027l" + RequestGraphemeClustering = "\x1b[?2027$p" +) + +// Light Dark Mode enables reporting the operating system's color scheme preference. +// +// Deprecated: use [ModeLightDark] instead. +const ( + LightDarkMode = DECMode(2031) + + SetLightDarkMode = "\x1b[?2031h" + ResetLightDarkMode = "\x1b[?2031l" + RequestLightDarkMode = "\x1b[?2031$p" +) + +// In Band Resize Mode reports terminal resize events as escape sequences. +// +// Deprecated: use [ModeInBandResize] instead. +const ( + InBandResizeMode = DECMode(2048) + + SetInBandResizeMode = "\x1b[?2048h" + ResetInBandResizeMode = "\x1b[?2048l" + RequestInBandResizeMode = "\x1b[?2048$p" +) + +// Win32Input determines whether input is processed by the Win32 console and Conpty. +// +// Deprecated: use [ModeWin32Input] instead. +const ( + Win32InputMode = DECMode(9001) + + SetWin32InputMode = "\x1b[?9001h" + ResetWin32InputMode = "\x1b[?9001l" + RequestWin32InputMode = "\x1b[?9001$p" +) + +// Deprecated: use [SetModeWin32Input], [ResetModeWin32Input], and +// [RequestModeWin32Input] instead. +const ( + EnableWin32Input = "\x1b[?9001h" //nolint:revive + DisableWin32Input = "\x1b[?9001l" + RequestWin32Input = "\x1b[?9001$p" +) diff --git a/vendor/github.com/charmbracelet/x/ansi/mouse.go b/vendor/github.com/charmbracelet/x/ansi/mouse.go index 0e4776bb2..94b8b4858 100644 --- a/vendor/github.com/charmbracelet/x/ansi/mouse.go +++ b/vendor/github.com/charmbracelet/x/ansi/mouse.go @@ -134,7 +134,7 @@ func EncodeMouseButton(b MouseButton, motion, shift, alt, ctrl bool) (m byte) { m |= bitMotion } - return //nolint:nakedret + return m } // x10Offset is the offset for X10 mouse events. diff --git a/vendor/github.com/charmbracelet/x/ansi/notification.go b/vendor/github.com/charmbracelet/x/ansi/notification.go index c712f3405..fd6221d24 100644 --- a/vendor/github.com/charmbracelet/x/ansi/notification.go +++ b/vendor/github.com/charmbracelet/x/ansi/notification.go @@ -1,5 +1,10 @@ package ansi +import ( + "fmt" + "strings" +) + // Notify sends a desktop notification using iTerm's OSC 9. // // OSC 9 ; Mc ST @@ -11,3 +16,17 @@ package ansi func Notify(s string) string { return "\x1b]9;" + s + "\x07" } + +// DesktopNotification sends a desktop notification based on the extensible OSC +// 99 escape code. +// +// OSC 99 ; ; ST +// OSC 99 ; ; BEL +// +// Where is a colon-separated list of key-value pairs, and +// is the notification body. +// +// See: https://sw.kovidgoyal.net/kitty/desktop-notifications/ +func DesktopNotification(payload string, metadata ...string) string { + return fmt.Sprintf("\x1b]99;%s;%s\x07", strings.Join(metadata, ":"), payload) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go index dfd2dc761..096c86354 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go @@ -4,8 +4,9 @@ import ( "unicode/utf8" "github.com/charmbracelet/x/ansi/parser" + "github.com/clipperhouse/displaywidth" + "github.com/clipperhouse/uax29/v2/graphemes" "github.com/mattn/go-runewidth" - "github.com/rivo/uniseg" ) // State represents the state of the ANSI escape sequence parser used by @@ -176,10 +177,7 @@ func decodeSequence[T string | []byte](m Method, b T, state State, p *Parser) (s } if utf8.RuneStart(c) { - seq, _, width, _ = FirstGraphemeCluster(b, -1) - if m == WcWidth { - width = runewidth.StringWidth(string(seq)) - } + seq, width = FirstGraphemeCluster(b, m) i += len(seq) return b[:i], width, i, NormalState } @@ -434,17 +432,22 @@ func HasEscPrefix[T string | []byte](b T) bool { return len(b) > 0 && b[0] == ESC } -// FirstGraphemeCluster returns the first grapheme cluster in the given string or byte slice. -// This is a syntactic sugar function that wraps -// uniseg.FirstGraphemeClusterInString and uniseg.FirstGraphemeCluster. -func FirstGraphemeCluster[T string | []byte](b T, state int) (T, T, int, int) { +// FirstGraphemeCluster returns the first grapheme cluster in the given string +// or byte slice, and its monospace display width. +func FirstGraphemeCluster[T string | []byte](b T, m Method) (T, int) { switch b := any(b).(type) { case string: - cluster, rest, width, newState := uniseg.FirstGraphemeClusterInString(b, state) - return T(cluster), T(rest), width, newState + cluster := graphemes.FromString(b).First() + if m == WcWidth { + return T(cluster), runewidth.StringWidth(cluster) + } + return T(cluster), displaywidth.String(cluster) case []byte: - cluster, rest, width, newState := uniseg.FirstGraphemeCluster(b, state) - return T(cluster), T(rest), width, newState + cluster := graphemes.FromBytes(b).First() + if m == WcWidth { + return T(cluster), runewidth.StringWidth(string(cluster)) + } + return T(cluster), displaywidth.Bytes(cluster) } panic("unreachable") } @@ -490,7 +493,7 @@ func Command(prefix, inter, final byte) (c int) { c = int(final) c |= int(prefix) << parser.PrefixShift c |= int(inter) << parser.IntermedShift - return + return c } // Param represents a sequence parameter. Sequence parameters with @@ -520,5 +523,5 @@ func Parameter(p int, hasMore bool) (s int) { if hasMore { s |= parser.HasMoreFlag } - return + return s } diff --git a/vendor/github.com/charmbracelet/x/ansi/sgr.go b/vendor/github.com/charmbracelet/x/ansi/sgr.go index 5e6d05dfe..b3481a9aa 100644 --- a/vendor/github.com/charmbracelet/x/ansi/sgr.go +++ b/vendor/github.com/charmbracelet/x/ansi/sgr.go @@ -21,59 +21,59 @@ func SGR(ps ...Attr) string { } var attrStrings = map[int]string{ - ResetAttr: resetAttr, - BoldAttr: boldAttr, - FaintAttr: faintAttr, - ItalicAttr: italicAttr, - UnderlineAttr: underlineAttr, - SlowBlinkAttr: slowBlinkAttr, - RapidBlinkAttr: rapidBlinkAttr, - ReverseAttr: reverseAttr, - ConcealAttr: concealAttr, - StrikethroughAttr: strikethroughAttr, - NormalIntensityAttr: normalIntensityAttr, - NoItalicAttr: noItalicAttr, - NoUnderlineAttr: noUnderlineAttr, - NoBlinkAttr: noBlinkAttr, - NoReverseAttr: noReverseAttr, - NoConcealAttr: noConcealAttr, - NoStrikethroughAttr: noStrikethroughAttr, - BlackForegroundColorAttr: blackForegroundColorAttr, - RedForegroundColorAttr: redForegroundColorAttr, - GreenForegroundColorAttr: greenForegroundColorAttr, - YellowForegroundColorAttr: yellowForegroundColorAttr, - BlueForegroundColorAttr: blueForegroundColorAttr, - MagentaForegroundColorAttr: magentaForegroundColorAttr, - CyanForegroundColorAttr: cyanForegroundColorAttr, - WhiteForegroundColorAttr: whiteForegroundColorAttr, - ExtendedForegroundColorAttr: extendedForegroundColorAttr, - DefaultForegroundColorAttr: defaultForegroundColorAttr, - BlackBackgroundColorAttr: blackBackgroundColorAttr, - RedBackgroundColorAttr: redBackgroundColorAttr, - GreenBackgroundColorAttr: greenBackgroundColorAttr, - YellowBackgroundColorAttr: yellowBackgroundColorAttr, - BlueBackgroundColorAttr: blueBackgroundColorAttr, - MagentaBackgroundColorAttr: magentaBackgroundColorAttr, - CyanBackgroundColorAttr: cyanBackgroundColorAttr, - WhiteBackgroundColorAttr: whiteBackgroundColorAttr, - ExtendedBackgroundColorAttr: extendedBackgroundColorAttr, - DefaultBackgroundColorAttr: defaultBackgroundColorAttr, - ExtendedUnderlineColorAttr: extendedUnderlineColorAttr, - DefaultUnderlineColorAttr: defaultUnderlineColorAttr, - BrightBlackForegroundColorAttr: brightBlackForegroundColorAttr, - BrightRedForegroundColorAttr: brightRedForegroundColorAttr, - BrightGreenForegroundColorAttr: brightGreenForegroundColorAttr, - BrightYellowForegroundColorAttr: brightYellowForegroundColorAttr, - BrightBlueForegroundColorAttr: brightBlueForegroundColorAttr, - BrightMagentaForegroundColorAttr: brightMagentaForegroundColorAttr, - BrightCyanForegroundColorAttr: brightCyanForegroundColorAttr, - BrightWhiteForegroundColorAttr: brightWhiteForegroundColorAttr, - BrightBlackBackgroundColorAttr: brightBlackBackgroundColorAttr, - BrightRedBackgroundColorAttr: brightRedBackgroundColorAttr, - BrightGreenBackgroundColorAttr: brightGreenBackgroundColorAttr, - BrightYellowBackgroundColorAttr: brightYellowBackgroundColorAttr, - BrightBlueBackgroundColorAttr: brightBlueBackgroundColorAttr, - BrightMagentaBackgroundColorAttr: brightMagentaBackgroundColorAttr, - BrightCyanBackgroundColorAttr: brightCyanBackgroundColorAttr, - BrightWhiteBackgroundColorAttr: brightWhiteBackgroundColorAttr, + AttrReset: attrReset, + AttrBold: attrBold, + AttrFaint: attrFaint, + AttrItalic: attrItalic, + AttrUnderline: attrUnderline, + AttrBlink: attrBlink, + AttrRapidBlink: attrRapidBlink, + AttrReverse: attrReverse, + AttrConceal: attrConceal, + AttrStrikethrough: attrStrikethrough, + AttrNormalIntensity: attrNormalIntensity, + AttrNoItalic: attrNoItalic, + AttrNoUnderline: attrNoUnderline, + AttrNoBlink: attrNoBlink, + AttrNoReverse: attrNoReverse, + AttrNoConceal: attrNoConceal, + AttrNoStrikethrough: attrNoStrikethrough, + AttrBlackForegroundColor: attrBlackForegroundColor, + AttrRedForegroundColor: attrRedForegroundColor, + AttrGreenForegroundColor: attrGreenForegroundColor, + AttrYellowForegroundColor: attrYellowForegroundColor, + AttrBlueForegroundColor: attrBlueForegroundColor, + AttrMagentaForegroundColor: attrMagentaForegroundColor, + AttrCyanForegroundColor: attrCyanForegroundColor, + AttrWhiteForegroundColor: attrWhiteForegroundColor, + AttrExtendedForegroundColor: attrExtendedForegroundColor, + AttrDefaultForegroundColor: attrDefaultForegroundColor, + AttrBlackBackgroundColor: attrBlackBackgroundColor, + AttrRedBackgroundColor: attrRedBackgroundColor, + AttrGreenBackgroundColor: attrGreenBackgroundColor, + AttrYellowBackgroundColor: attrYellowBackgroundColor, + AttrBlueBackgroundColor: attrBlueBackgroundColor, + AttrMagentaBackgroundColor: attrMagentaBackgroundColor, + AttrCyanBackgroundColor: attrCyanBackgroundColor, + AttrWhiteBackgroundColor: attrWhiteBackgroundColor, + AttrExtendedBackgroundColor: attrExtendedBackgroundColor, + AttrDefaultBackgroundColor: attrDefaultBackgroundColor, + AttrExtendedUnderlineColor: attrExtendedUnderlineColor, + AttrDefaultUnderlineColor: attrDefaultUnderlineColor, + AttrBrightBlackForegroundColor: attrBrightBlackForegroundColor, + AttrBrightRedForegroundColor: attrBrightRedForegroundColor, + AttrBrightGreenForegroundColor: attrBrightGreenForegroundColor, + AttrBrightYellowForegroundColor: attrBrightYellowForegroundColor, + AttrBrightBlueForegroundColor: attrBrightBlueForegroundColor, + AttrBrightMagentaForegroundColor: attrBrightMagentaForegroundColor, + AttrBrightCyanForegroundColor: attrBrightCyanForegroundColor, + AttrBrightWhiteForegroundColor: attrBrightWhiteForegroundColor, + AttrBrightBlackBackgroundColor: attrBrightBlackBackgroundColor, + AttrBrightRedBackgroundColor: attrBrightRedBackgroundColor, + AttrBrightGreenBackgroundColor: attrBrightGreenBackgroundColor, + AttrBrightYellowBackgroundColor: attrBrightYellowBackgroundColor, + AttrBrightBlueBackgroundColor: attrBrightBlueBackgroundColor, + AttrBrightMagentaBackgroundColor: attrBrightMagentaBackgroundColor, + AttrBrightCyanBackgroundColor: attrBrightCyanBackgroundColor, + AttrBrightWhiteBackgroundColor: attrBrightWhiteBackgroundColor, } diff --git a/vendor/github.com/charmbracelet/x/ansi/style.go b/vendor/github.com/charmbracelet/x/ansi/style.go index d8a7efaec..44649b42a 100644 --- a/vendor/github.com/charmbracelet/x/ansi/style.go +++ b/vendor/github.com/charmbracelet/x/ansi/style.go @@ -17,7 +17,9 @@ type Attr = int // Style represents an ANSI SGR (Select Graphic Rendition) style. type Style []string -// NewStyle returns a new style with the given attributes. +// NewStyle returns a new style with the given attributes. Attributes are SGR +// (Select Graphic Rendition) codes that control text formatting like bold, +// italic, colors, etc. func NewStyle(attrs ...Attr) Style { if len(attrs) == 0 { return Style{} @@ -46,7 +48,8 @@ func (s Style) String() string { return "\x1b[" + strings.Join(s, ";") + "m" } -// Styled returns a styled string with the given style applied. +// Styled returns a styled string with the given style applied. The style is +// applied at the beginning and reset at the end of the string. func (s Style) Styled(str string) string { if len(s) == 0 { return str @@ -54,309 +57,446 @@ func (s Style) Styled(str string) string { return s.String() + str + ResetStyle } -// Reset appends the reset style attribute to the style. +// Reset appends the reset style attribute to the style. This resets all +// formatting attributes to their defaults. func (s Style) Reset() Style { - return append(s, resetAttr) + return append(s, attrReset) } -// Bold appends the bold style attribute to the style. +// Bold appends the bold or normal intensity style attribute to the style. +// You can use [Style.Normal] to reset to normal intensity. func (s Style) Bold() Style { - return append(s, boldAttr) + return append(s, attrBold) } -// Faint appends the faint style attribute to the style. +// Faint appends the faint or normal intensity style attribute to the style. +// You can use [Style.Normal] to reset to normal intensity. func (s Style) Faint() Style { - return append(s, faintAttr) + return append(s, attrFaint) } -// Italic appends the italic style attribute to the style. -func (s Style) Italic() Style { - return append(s, italicAttr) +// Italic appends the italic or no italic style attribute to the style. +// When v is true, text is rendered in italic. When false, italic is disabled. +func (s Style) Italic(v bool) Style { + if v { + return append(s, attrItalic) + } + return append(s, attrNoItalic) } -// Underline appends the underline style attribute to the style. -func (s Style) Underline() Style { - return append(s, underlineAttr) +// Underline appends the underline or no underline style attribute to the style. +// When v is true, text is underlined. When false, underline is disabled. +func (s Style) Underline(v bool) Style { + if v { + return append(s, attrUnderline) + } + return append(s, attrNoUnderline) } // UnderlineStyle appends the underline style attribute to the style. -func (s Style) UnderlineStyle(u UnderlineStyle) Style { +// Supports various underline styles including single, double, curly, dotted, +// and dashed. +func (s Style) UnderlineStyle(u Underline) Style { switch u { - case NoUnderlineStyle: - return s.NoUnderline() - case SingleUnderlineStyle: - return s.Underline() - case DoubleUnderlineStyle: - return append(s, doubleUnderlineStyle) - case CurlyUnderlineStyle: - return append(s, curlyUnderlineStyle) - case DottedUnderlineStyle: - return append(s, dottedUnderlineStyle) - case DashedUnderlineStyle: - return append(s, dashedUnderlineStyle) + case UnderlineNone: + return s.Underline(false) + case UnderlineSingle: + return s.Underline(true) + case UnderlineDouble: + return append(s, underlineDouble) + case UnderlineCurly: + return append(s, underlineCurly) + case UnderlineDotted: + return append(s, underlineDotted) + case UnderlineDashed: + return append(s, underlineDashed) } return s } -// DoubleUnderline appends the double underline style attribute to the style. -// This is a convenience method for UnderlineStyle(DoubleUnderlineStyle). -func (s Style) DoubleUnderline() Style { - return s.UnderlineStyle(DoubleUnderlineStyle) +// Blink appends the slow blink or no blink style attribute to the style. +// When v is true, text blinks slowly (less than 150 per minute). When false, +// blinking is disabled. +func (s Style) Blink(v bool) Style { + if v { + return append(s, attrBlink) + } + return append(s, attrNoBlink) } -// CurlyUnderline appends the curly underline style attribute to the style. -// This is a convenience method for UnderlineStyle(CurlyUnderlineStyle). -func (s Style) CurlyUnderline() Style { - return s.UnderlineStyle(CurlyUnderlineStyle) +// RapidBlink appends the rapid blink or no blink style attribute to the style. +// When v is true, text blinks rapidly (150+ per minute). When false, blinking +// is disabled. +// +// Note that this is not widely supported in terminal emulators. +func (s Style) RapidBlink(v bool) Style { + if v { + return append(s, attrRapidBlink) + } + return append(s, attrNoBlink) } -// DottedUnderline appends the dotted underline style attribute to the style. -// This is a convenience method for UnderlineStyle(DottedUnderlineStyle). -func (s Style) DottedUnderline() Style { - return s.UnderlineStyle(DottedUnderlineStyle) +// Reverse appends the reverse or no reverse style attribute to the style. +// When v is true, foreground and background colors are swapped. When false, +// reverse video is disabled. +func (s Style) Reverse(v bool) Style { + if v { + return append(s, attrReverse) + } + return append(s, attrNoReverse) } -// DashedUnderline appends the dashed underline style attribute to the style. -// This is a convenience method for UnderlineStyle(DashedUnderlineStyle). -func (s Style) DashedUnderline() Style { - return s.UnderlineStyle(DashedUnderlineStyle) +// Conceal appends the conceal or no conceal style attribute to the style. +// When v is true, text is hidden/concealed. When false, concealment is +// disabled. +func (s Style) Conceal(v bool) Style { + if v { + return append(s, attrConceal) + } + return append(s, attrNoConceal) } -// SlowBlink appends the slow blink style attribute to the style. -func (s Style) SlowBlink() Style { - return append(s, slowBlinkAttr) +// Strikethrough appends the strikethrough or no strikethrough style attribute +// to the style. When v is true, text is rendered with a horizontal line through +// it. When false, strikethrough is disabled. +func (s Style) Strikethrough(v bool) Style { + if v { + return append(s, attrStrikethrough) + } + return append(s, attrNoStrikethrough) } -// RapidBlink appends the rapid blink style attribute to the style. -func (s Style) RapidBlink() Style { - return append(s, rapidBlinkAttr) -} - -// Reverse appends the reverse style attribute to the style. -func (s Style) Reverse() Style { - return append(s, reverseAttr) -} - -// Conceal appends the conceal style attribute to the style. -func (s Style) Conceal() Style { - return append(s, concealAttr) -} - -// Strikethrough appends the strikethrough style attribute to the style. -func (s Style) Strikethrough() Style { - return append(s, strikethroughAttr) -} - -// NormalIntensity appends the normal intensity style attribute to the style. -func (s Style) NormalIntensity() Style { - return append(s, normalIntensityAttr) +// Normal appends the normal intensity style attribute to the style. This +// resets [Style.Bold] and [Style.Faint] attributes. +func (s Style) Normal() Style { + return append(s, attrNormalIntensity) } // NoItalic appends the no italic style attribute to the style. +// +// Deprecated: use [Style.Italic](false) instead. func (s Style) NoItalic() Style { - return append(s, noItalicAttr) + return append(s, attrNoItalic) } // NoUnderline appends the no underline style attribute to the style. +// +// Deprecated: use [Style.Underline](false) instead. func (s Style) NoUnderline() Style { - return append(s, noUnderlineAttr) + return append(s, attrNoUnderline) } // NoBlink appends the no blink style attribute to the style. +// +// Deprecated: use [Style.Blink](false) or [Style.RapidBlink](false) instead. func (s Style) NoBlink() Style { - return append(s, noBlinkAttr) + return append(s, attrNoBlink) } // NoReverse appends the no reverse style attribute to the style. +// +// Deprecated: use [Style.Reverse](false) instead. func (s Style) NoReverse() Style { - return append(s, noReverseAttr) + return append(s, attrNoReverse) } // NoConceal appends the no conceal style attribute to the style. +// +// Deprecated: use [Style.Conceal](false) instead. func (s Style) NoConceal() Style { - return append(s, noConcealAttr) + return append(s, attrNoConceal) } // NoStrikethrough appends the no strikethrough style attribute to the style. +// +// Deprecated: use [Style.Strikethrough](false) instead. func (s Style) NoStrikethrough() Style { - return append(s, noStrikethroughAttr) + return append(s, attrNoStrikethrough) } // DefaultForegroundColor appends the default foreground color style attribute to the style. +// +// Deprecated: use [Style.ForegroundColor](nil) instead. func (s Style) DefaultForegroundColor() Style { - return append(s, defaultForegroundColorAttr) + return append(s, attrDefaultForegroundColor) } // DefaultBackgroundColor appends the default background color style attribute to the style. +// +// Deprecated: use [Style.BackgroundColor](nil) instead. func (s Style) DefaultBackgroundColor() Style { - return append(s, defaultBackgroundColorAttr) + return append(s, attrDefaultBackgroundColor) } // DefaultUnderlineColor appends the default underline color style attribute to the style. +// +// Deprecated: use [Style.UnderlineColor](nil) instead. func (s Style) DefaultUnderlineColor() Style { - return append(s, defaultUnderlineColorAttr) + return append(s, attrDefaultUnderlineColor) } // ForegroundColor appends the foreground color style attribute to the style. +// If c is nil, the default foreground color is used. Supports [BasicColor], +// [IndexedColor] (256-color), and [color.Color] (24-bit RGB). func (s Style) ForegroundColor(c Color) Style { + if c == nil { + return append(s, attrDefaultForegroundColor) + } return append(s, foregroundColorString(c)) } // BackgroundColor appends the background color style attribute to the style. +// If c is nil, the default background color is used. Supports [BasicColor], +// [IndexedColor] (256-color), and [color.Color] (24-bit RGB). func (s Style) BackgroundColor(c Color) Style { + if c == nil { + return append(s, attrDefaultBackgroundColor) + } return append(s, backgroundColorString(c)) } // UnderlineColor appends the underline color style attribute to the style. +// If c is nil, the default underline color is used. Supports [BasicColor], +// [IndexedColor] (256-color), and [color.Color] (24-bit RGB). func (s Style) UnderlineColor(c Color) Style { + if c == nil { + return append(s, attrDefaultUnderlineColor) + } return append(s, underlineColorString(c)) } +// Underline represents an ANSI SGR (Select Graphic Rendition) underline style. +type Underline = byte + // UnderlineStyle represents an ANSI SGR (Select Graphic Rendition) underline // style. +// +// Deprecated: use [Underline] instead. type UnderlineStyle = byte const ( - doubleUnderlineStyle = "4:2" - curlyUnderlineStyle = "4:3" - dottedUnderlineStyle = "4:4" - dashedUnderlineStyle = "4:5" + underlineDouble = "4:2" + underlineCurly = "4:3" + underlineDotted = "4:4" + underlineDashed = "4:5" ) +// Underline styles constants. const ( - // NoUnderlineStyle is the default underline style. - NoUnderlineStyle UnderlineStyle = iota - // SingleUnderlineStyle is a single underline style. + UnderlineNone Underline = iota + UnderlineSingle + UnderlineDouble + UnderlineCurly + UnderlineDotted + UnderlineDashed +) + +// Underline styles constants. +// +// Deprecated: use [UnderlineNone], [UnderlineSingle], etc. instead. +const ( + NoUnderlineStyle Underline = iota SingleUnderlineStyle - // DoubleUnderlineStyle is a double underline style. DoubleUnderlineStyle - // CurlyUnderlineStyle is a curly underline style. CurlyUnderlineStyle - // DottedUnderlineStyle is a dotted underline style. DottedUnderlineStyle - // DashedUnderlineStyle is a dashed underline style. DashedUnderlineStyle ) +// Underline styles constants. +// +// Deprecated: use [UnderlineNone], [UnderlineSingle], etc. instead. +const ( + UnderlineStyleNone Underline = iota + UnderlineStyleSingle + UnderlineStyleDouble + UnderlineStyleCurly + UnderlineStyleDotted + UnderlineStyleDashed +) + // SGR (Select Graphic Rendition) style attributes. // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters const ( - ResetAttr Attr = 0 - BoldAttr Attr = 1 - FaintAttr Attr = 2 - ItalicAttr Attr = 3 - UnderlineAttr Attr = 4 - SlowBlinkAttr Attr = 5 - RapidBlinkAttr Attr = 6 - ReverseAttr Attr = 7 - ConcealAttr Attr = 8 - StrikethroughAttr Attr = 9 - NormalIntensityAttr Attr = 22 - NoItalicAttr Attr = 23 - NoUnderlineAttr Attr = 24 - NoBlinkAttr Attr = 25 - NoReverseAttr Attr = 27 - NoConcealAttr Attr = 28 - NoStrikethroughAttr Attr = 29 - BlackForegroundColorAttr Attr = 30 - RedForegroundColorAttr Attr = 31 - GreenForegroundColorAttr Attr = 32 - YellowForegroundColorAttr Attr = 33 - BlueForegroundColorAttr Attr = 34 - MagentaForegroundColorAttr Attr = 35 - CyanForegroundColorAttr Attr = 36 - WhiteForegroundColorAttr Attr = 37 - ExtendedForegroundColorAttr Attr = 38 - DefaultForegroundColorAttr Attr = 39 - BlackBackgroundColorAttr Attr = 40 - RedBackgroundColorAttr Attr = 41 - GreenBackgroundColorAttr Attr = 42 - YellowBackgroundColorAttr Attr = 43 - BlueBackgroundColorAttr Attr = 44 - MagentaBackgroundColorAttr Attr = 45 - CyanBackgroundColorAttr Attr = 46 - WhiteBackgroundColorAttr Attr = 47 - ExtendedBackgroundColorAttr Attr = 48 - DefaultBackgroundColorAttr Attr = 49 - ExtendedUnderlineColorAttr Attr = 58 - DefaultUnderlineColorAttr Attr = 59 - BrightBlackForegroundColorAttr Attr = 90 - BrightRedForegroundColorAttr Attr = 91 - BrightGreenForegroundColorAttr Attr = 92 - BrightYellowForegroundColorAttr Attr = 93 - BrightBlueForegroundColorAttr Attr = 94 - BrightMagentaForegroundColorAttr Attr = 95 - BrightCyanForegroundColorAttr Attr = 96 - BrightWhiteForegroundColorAttr Attr = 97 - BrightBlackBackgroundColorAttr Attr = 100 - BrightRedBackgroundColorAttr Attr = 101 - BrightGreenBackgroundColorAttr Attr = 102 - BrightYellowBackgroundColorAttr Attr = 103 - BrightBlueBackgroundColorAttr Attr = 104 - BrightMagentaBackgroundColorAttr Attr = 105 - BrightCyanBackgroundColorAttr Attr = 106 - BrightWhiteBackgroundColorAttr Attr = 107 + AttrReset Attr = 0 + AttrBold Attr = 1 + AttrFaint Attr = 2 + AttrItalic Attr = 3 + AttrUnderline Attr = 4 + AttrBlink Attr = 5 + AttrRapidBlink Attr = 6 + AttrReverse Attr = 7 + AttrConceal Attr = 8 + AttrStrikethrough Attr = 9 + AttrNormalIntensity Attr = 22 + AttrNoItalic Attr = 23 + AttrNoUnderline Attr = 24 + AttrNoBlink Attr = 25 + AttrNoReverse Attr = 27 + AttrNoConceal Attr = 28 + AttrNoStrikethrough Attr = 29 + AttrBlackForegroundColor Attr = 30 + AttrRedForegroundColor Attr = 31 + AttrGreenForegroundColor Attr = 32 + AttrYellowForegroundColor Attr = 33 + AttrBlueForegroundColor Attr = 34 + AttrMagentaForegroundColor Attr = 35 + AttrCyanForegroundColor Attr = 36 + AttrWhiteForegroundColor Attr = 37 + AttrExtendedForegroundColor Attr = 38 + AttrDefaultForegroundColor Attr = 39 + AttrBlackBackgroundColor Attr = 40 + AttrRedBackgroundColor Attr = 41 + AttrGreenBackgroundColor Attr = 42 + AttrYellowBackgroundColor Attr = 43 + AttrBlueBackgroundColor Attr = 44 + AttrMagentaBackgroundColor Attr = 45 + AttrCyanBackgroundColor Attr = 46 + AttrWhiteBackgroundColor Attr = 47 + AttrExtendedBackgroundColor Attr = 48 + AttrDefaultBackgroundColor Attr = 49 + AttrExtendedUnderlineColor Attr = 58 + AttrDefaultUnderlineColor Attr = 59 + AttrBrightBlackForegroundColor Attr = 90 + AttrBrightRedForegroundColor Attr = 91 + AttrBrightGreenForegroundColor Attr = 92 + AttrBrightYellowForegroundColor Attr = 93 + AttrBrightBlueForegroundColor Attr = 94 + AttrBrightMagentaForegroundColor Attr = 95 + AttrBrightCyanForegroundColor Attr = 96 + AttrBrightWhiteForegroundColor Attr = 97 + AttrBrightBlackBackgroundColor Attr = 100 + AttrBrightRedBackgroundColor Attr = 101 + AttrBrightGreenBackgroundColor Attr = 102 + AttrBrightYellowBackgroundColor Attr = 103 + AttrBrightBlueBackgroundColor Attr = 104 + AttrBrightMagentaBackgroundColor Attr = 105 + AttrBrightCyanBackgroundColor Attr = 106 + AttrBrightWhiteBackgroundColor Attr = 107 - RGBColorIntroducerAttr Attr = 2 - ExtendedColorIntroducerAttr Attr = 5 + AttrRGBColorIntroducer Attr = 2 + AttrExtendedColorIntroducer Attr = 5 +) + +// SGR (Select Graphic Rendition) style attributes. +// +// Deprecated: use Attr* constants instead. +const ( + ResetAttr = AttrReset + BoldAttr = AttrBold + FaintAttr = AttrFaint + ItalicAttr = AttrItalic + UnderlineAttr = AttrUnderline + SlowBlinkAttr = AttrBlink + RapidBlinkAttr = AttrRapidBlink + ReverseAttr = AttrReverse + ConcealAttr = AttrConceal + StrikethroughAttr = AttrStrikethrough + NormalIntensityAttr = AttrNormalIntensity + NoItalicAttr = AttrNoItalic + NoUnderlineAttr = AttrNoUnderline + NoBlinkAttr = AttrNoBlink + NoReverseAttr = AttrNoReverse + NoConcealAttr = AttrNoConceal + NoStrikethroughAttr = AttrNoStrikethrough + BlackForegroundColorAttr = AttrBlackForegroundColor + RedForegroundColorAttr = AttrRedForegroundColor + GreenForegroundColorAttr = AttrGreenForegroundColor + YellowForegroundColorAttr = AttrYellowForegroundColor + BlueForegroundColorAttr = AttrBlueForegroundColor + MagentaForegroundColorAttr = AttrMagentaForegroundColor + CyanForegroundColorAttr = AttrCyanForegroundColor + WhiteForegroundColorAttr = AttrWhiteForegroundColor + ExtendedForegroundColorAttr = AttrExtendedForegroundColor + DefaultForegroundColorAttr = AttrDefaultForegroundColor + BlackBackgroundColorAttr = AttrBlackBackgroundColor + RedBackgroundColorAttr = AttrRedBackgroundColor + GreenBackgroundColorAttr = AttrGreenBackgroundColor + YellowBackgroundColorAttr = AttrYellowBackgroundColor + BlueBackgroundColorAttr = AttrBlueBackgroundColor + MagentaBackgroundColorAttr = AttrMagentaBackgroundColor + CyanBackgroundColorAttr = AttrCyanBackgroundColor + WhiteBackgroundColorAttr = AttrWhiteBackgroundColor + ExtendedBackgroundColorAttr = AttrExtendedBackgroundColor + DefaultBackgroundColorAttr = AttrDefaultBackgroundColor + ExtendedUnderlineColorAttr = AttrExtendedUnderlineColor + DefaultUnderlineColorAttr = AttrDefaultUnderlineColor + BrightBlackForegroundColorAttr = AttrBrightBlackForegroundColor + BrightRedForegroundColorAttr = AttrBrightRedForegroundColor + BrightGreenForegroundColorAttr = AttrBrightGreenForegroundColor + BrightYellowForegroundColorAttr = AttrBrightYellowForegroundColor + BrightBlueForegroundColorAttr = AttrBrightBlueForegroundColor + BrightMagentaForegroundColorAttr = AttrBrightMagentaForegroundColor + BrightCyanForegroundColorAttr = AttrBrightCyanForegroundColor + BrightWhiteForegroundColorAttr = AttrBrightWhiteForegroundColor + BrightBlackBackgroundColorAttr = AttrBrightBlackBackgroundColor + BrightRedBackgroundColorAttr = AttrBrightRedBackgroundColor + BrightGreenBackgroundColorAttr = AttrBrightGreenBackgroundColor + BrightYellowBackgroundColorAttr = AttrBrightYellowBackgroundColor + BrightBlueBackgroundColorAttr = AttrBrightBlueBackgroundColor + BrightMagentaBackgroundColorAttr = AttrBrightMagentaBackgroundColor + BrightCyanBackgroundColorAttr = AttrBrightCyanBackgroundColor + BrightWhiteBackgroundColorAttr = AttrBrightWhiteBackgroundColor + RGBColorIntroducerAttr = AttrRGBColorIntroducer + ExtendedColorIntroducerAttr = AttrExtendedColorIntroducer ) const ( - resetAttr = "0" - boldAttr = "1" - faintAttr = "2" - italicAttr = "3" - underlineAttr = "4" - slowBlinkAttr = "5" - rapidBlinkAttr = "6" - reverseAttr = "7" - concealAttr = "8" - strikethroughAttr = "9" - normalIntensityAttr = "22" - noItalicAttr = "23" - noUnderlineAttr = "24" - noBlinkAttr = "25" - noReverseAttr = "27" - noConcealAttr = "28" - noStrikethroughAttr = "29" - blackForegroundColorAttr = "30" - redForegroundColorAttr = "31" - greenForegroundColorAttr = "32" - yellowForegroundColorAttr = "33" - blueForegroundColorAttr = "34" - magentaForegroundColorAttr = "35" - cyanForegroundColorAttr = "36" - whiteForegroundColorAttr = "37" - extendedForegroundColorAttr = "38" - defaultForegroundColorAttr = "39" - blackBackgroundColorAttr = "40" - redBackgroundColorAttr = "41" - greenBackgroundColorAttr = "42" - yellowBackgroundColorAttr = "43" - blueBackgroundColorAttr = "44" - magentaBackgroundColorAttr = "45" - cyanBackgroundColorAttr = "46" - whiteBackgroundColorAttr = "47" - extendedBackgroundColorAttr = "48" - defaultBackgroundColorAttr = "49" - extendedUnderlineColorAttr = "58" - defaultUnderlineColorAttr = "59" - brightBlackForegroundColorAttr = "90" - brightRedForegroundColorAttr = "91" - brightGreenForegroundColorAttr = "92" - brightYellowForegroundColorAttr = "93" - brightBlueForegroundColorAttr = "94" - brightMagentaForegroundColorAttr = "95" - brightCyanForegroundColorAttr = "96" - brightWhiteForegroundColorAttr = "97" - brightBlackBackgroundColorAttr = "100" - brightRedBackgroundColorAttr = "101" - brightGreenBackgroundColorAttr = "102" - brightYellowBackgroundColorAttr = "103" - brightBlueBackgroundColorAttr = "104" - brightMagentaBackgroundColorAttr = "105" - brightCyanBackgroundColorAttr = "106" - brightWhiteBackgroundColorAttr = "107" + attrReset = "0" + attrBold = "1" + attrFaint = "2" + attrItalic = "3" + attrUnderline = "4" + attrBlink = "5" + attrRapidBlink = "6" + attrReverse = "7" + attrConceal = "8" + attrStrikethrough = "9" + attrNormalIntensity = "22" + attrNoItalic = "23" + attrNoUnderline = "24" + attrNoBlink = "25" + attrNoReverse = "27" + attrNoConceal = "28" + attrNoStrikethrough = "29" + attrBlackForegroundColor = "30" + attrRedForegroundColor = "31" + attrGreenForegroundColor = "32" + attrYellowForegroundColor = "33" + attrBlueForegroundColor = "34" + attrMagentaForegroundColor = "35" + attrCyanForegroundColor = "36" + attrWhiteForegroundColor = "37" + attrExtendedForegroundColor = "38" + attrDefaultForegroundColor = "39" + attrBlackBackgroundColor = "40" + attrRedBackgroundColor = "41" + attrGreenBackgroundColor = "42" + attrYellowBackgroundColor = "43" + attrBlueBackgroundColor = "44" + attrMagentaBackgroundColor = "45" + attrCyanBackgroundColor = "46" + attrWhiteBackgroundColor = "47" + attrExtendedBackgroundColor = "48" + attrDefaultBackgroundColor = "49" + attrExtendedUnderlineColor = "58" + attrDefaultUnderlineColor = "59" + attrBrightBlackForegroundColor = "90" + attrBrightRedForegroundColor = "91" + attrBrightGreenForegroundColor = "92" + attrBrightYellowForegroundColor = "93" + attrBrightBlueForegroundColor = "94" + attrBrightMagentaForegroundColor = "95" + attrBrightCyanForegroundColor = "96" + attrBrightWhiteForegroundColor = "97" + attrBrightBlackBackgroundColor = "100" + attrBrightRedBackgroundColor = "101" + attrBrightGreenBackgroundColor = "102" + attrBrightYellowBackgroundColor = "103" + attrBrightBlueBackgroundColor = "104" + attrBrightMagentaBackgroundColor = "105" + attrBrightCyanBackgroundColor = "106" + attrBrightWhiteBackgroundColor = "107" ) // foregroundColorString returns the style SGR attribute for the given @@ -364,42 +504,44 @@ const ( // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters func foregroundColorString(c Color) string { switch c := c.(type) { + case nil: + return attrDefaultForegroundColor case BasicColor: // 3-bit or 4-bit ANSI foreground // "3" or "9" where n is the color number from 0 to 7 switch c { case Black: - return blackForegroundColorAttr + return attrBlackForegroundColor case Red: - return redForegroundColorAttr + return attrRedForegroundColor case Green: - return greenForegroundColorAttr + return attrGreenForegroundColor case Yellow: - return yellowForegroundColorAttr + return attrYellowForegroundColor case Blue: - return blueForegroundColorAttr + return attrBlueForegroundColor case Magenta: - return magentaForegroundColorAttr + return attrMagentaForegroundColor case Cyan: - return cyanForegroundColorAttr + return attrCyanForegroundColor case White: - return whiteForegroundColorAttr + return attrWhiteForegroundColor case BrightBlack: - return brightBlackForegroundColorAttr + return attrBrightBlackForegroundColor case BrightRed: - return brightRedForegroundColorAttr + return attrBrightRedForegroundColor case BrightGreen: - return brightGreenForegroundColorAttr + return attrBrightGreenForegroundColor case BrightYellow: - return brightYellowForegroundColorAttr + return attrBrightYellowForegroundColor case BrightBlue: - return brightBlueForegroundColorAttr + return attrBrightBlueForegroundColor case BrightMagenta: - return brightMagentaForegroundColorAttr + return attrBrightMagentaForegroundColor case BrightCyan: - return brightCyanForegroundColorAttr + return attrBrightCyanForegroundColor case BrightWhite: - return brightWhiteForegroundColorAttr + return attrBrightWhiteForegroundColor } case ExtendedColor: // 256-color ANSI foreground @@ -414,7 +556,7 @@ func foregroundColorString(c Color) string { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return defaultForegroundColorAttr + return attrDefaultForegroundColor } // backgroundColorString returns the style SGR attribute for the given @@ -422,42 +564,44 @@ func foregroundColorString(c Color) string { // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters func backgroundColorString(c Color) string { switch c := c.(type) { + case nil: + return attrDefaultBackgroundColor case BasicColor: // 3-bit or 4-bit ANSI foreground // "4" or "10" where n is the color number from 0 to 7 switch c { case Black: - return blackBackgroundColorAttr + return attrBlackBackgroundColor case Red: - return redBackgroundColorAttr + return attrRedBackgroundColor case Green: - return greenBackgroundColorAttr + return attrGreenBackgroundColor case Yellow: - return yellowBackgroundColorAttr + return attrYellowBackgroundColor case Blue: - return blueBackgroundColorAttr + return attrBlueBackgroundColor case Magenta: - return magentaBackgroundColorAttr + return attrMagentaBackgroundColor case Cyan: - return cyanBackgroundColorAttr + return attrCyanBackgroundColor case White: - return whiteBackgroundColorAttr + return attrWhiteBackgroundColor case BrightBlack: - return brightBlackBackgroundColorAttr + return attrBrightBlackBackgroundColor case BrightRed: - return brightRedBackgroundColorAttr + return attrBrightRedBackgroundColor case BrightGreen: - return brightGreenBackgroundColorAttr + return attrBrightGreenBackgroundColor case BrightYellow: - return brightYellowBackgroundColorAttr + return attrBrightYellowBackgroundColor case BrightBlue: - return brightBlueBackgroundColorAttr + return attrBrightBlueBackgroundColor case BrightMagenta: - return brightMagentaBackgroundColorAttr + return attrBrightMagentaBackgroundColor case BrightCyan: - return brightCyanBackgroundColorAttr + return attrBrightCyanBackgroundColor case BrightWhite: - return brightWhiteBackgroundColorAttr + return attrBrightWhiteBackgroundColor } case ExtendedColor: // 256-color ANSI foreground @@ -472,7 +616,7 @@ func backgroundColorString(c Color) string { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return defaultBackgroundColorAttr + return attrDefaultBackgroundColor } // underlineColorString returns the style SGR attribute for the given underline @@ -480,6 +624,8 @@ func backgroundColorString(c Color) string { // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters func underlineColorString(c Color) string { switch c := c.(type) { + case nil: + return attrDefaultUnderlineColor // NOTE: we can't use 3-bit and 4-bit ANSI color codes with underline // color, use 256-color instead. // @@ -498,7 +644,7 @@ func underlineColorString(c Color) string { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return defaultUnderlineColorAttr + return attrDefaultUnderlineColor } // ReadStyleColor decodes a color from a slice of parameters. It returns the @@ -526,7 +672,7 @@ func underlineColorString(c Color) string { // 2. Support ignoring and omitting the color space id (second parameter) with respect to RGB colors // 3. Support ignoring and omitting the 6th parameter with respect to RGB and CMY colors // 4. Support reading RGBA colors -func ReadStyleColor(params Params, co *color.Color) (n int) { +func ReadStyleColor(params Params, co *color.Color) int { if len(params) < 2 { // Need at least SGR type and color type return 0 } @@ -535,7 +681,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) { s := params[0] p := params[1] colorType := p.Param(0) - n = 2 + n := 2 paramsfn := func() (p1, p2, p3, p4 int) { // Where should we start reading the color? @@ -594,7 +740,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) { B: uint8(b), //nolint:gosec A: 0xff, } - return //nolint:nakedret + return n case 3: // CMY direct color if len(params) < 5 { @@ -612,7 +758,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) { Y: uint8(y), //nolint:gosec K: 0, } - return //nolint:nakedret + return n case 4: // CMYK direct color if len(params) < 6 { @@ -630,7 +776,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) { Y: uint8(y), //nolint:gosec K: uint8(k), //nolint:gosec } - return //nolint:nakedret + return n case 5: // indexed color if len(params) < 3 { @@ -665,7 +811,7 @@ func ReadStyleColor(params Params, co *color.Color) (n int) { B: uint8(b), //nolint:gosec A: uint8(a), //nolint:gosec } - return //nolint:nakedret + return n default: return 0 diff --git a/vendor/github.com/charmbracelet/x/ansi/truncate.go b/vendor/github.com/charmbracelet/x/ansi/truncate.go index 3f541fa56..f95f82930 100644 --- a/vendor/github.com/charmbracelet/x/ansi/truncate.go +++ b/vendor/github.com/charmbracelet/x/ansi/truncate.go @@ -1,11 +1,11 @@ package ansi import ( - "bytes" + "strings" "github.com/charmbracelet/x/ansi/parser" - "github.com/mattn/go-runewidth" - "github.com/rivo/uniseg" + "github.com/clipperhouse/displaywidth" + "github.com/clipperhouse/uax29/v2/graphemes" ) // Cut the string, without adding any prefix or tail strings. This function is @@ -74,12 +74,11 @@ func truncate(m Method, s string, length int, tail string) string { return "" } - var cluster []byte - var buf bytes.Buffer + var cluster string + var buf strings.Builder curWidth := 0 ignoring := false pstate := parser.GroundState // initial state - b := []byte(s) i := 0 // Here we iterate over the bytes of the string and collect printable @@ -88,16 +87,12 @@ func truncate(m Method, s string, length int, tail string) string { // // Once we reach the given length, we start ignoring characters and only // collect ANSI escape codes until we reach the end of string. - for i < len(b) { - state, action := parser.Table.Transition(pstate, b[i]) + for i < len(s) { + state, action := parser.Table.Transition(pstate, s[i]) if state == parser.Utf8State { // This action happens when we transition to the Utf8State. var width int - cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) - if m == WcWidth { - width = runewidth.StringWidth(string(cluster)) - } - + cluster, width = FirstGraphemeCluster(s[i:], m) // increment the index by the length of the cluster i += len(cluster) curWidth += width @@ -118,7 +113,7 @@ func truncate(m Method, s string, length int, tail string) string { continue } - buf.Write(cluster) + buf.WriteString(cluster) // Done collecting, now we're back in the ground state. pstate = parser.GroundState @@ -152,7 +147,7 @@ func truncate(m Method, s string, length int, tail string) string { } fallthrough default: - buf.WriteByte(b[i]) + buf.WriteByte(s[i]) i++ } @@ -193,27 +188,23 @@ func truncateLeft(m Method, s string, n int, prefix string) string { return s } - var cluster []byte - var buf bytes.Buffer + var cluster string + var buf strings.Builder curWidth := 0 ignoring := true pstate := parser.GroundState - b := []byte(s) i := 0 - for i < len(b) { + for i < len(s) { if !ignoring { - buf.Write(b[i:]) + buf.WriteString(s[i:]) break } - state, action := parser.Table.Transition(pstate, b[i]) + state, action := parser.Table.Transition(pstate, s[i]) if state == parser.Utf8State { var width int - cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) - if m == WcWidth { - width = runewidth.StringWidth(string(cluster)) - } + cluster, width = FirstGraphemeCluster(s[i:], m) i += len(cluster) curWidth += width @@ -224,7 +215,7 @@ func truncateLeft(m Method, s string, n int, prefix string) string { } if curWidth > n { - buf.Write(cluster) + buf.WriteString(cluster) } if ignoring { @@ -259,7 +250,7 @@ func truncateLeft(m Method, s string, n int, prefix string) string { } fallthrough default: - buf.WriteByte(b[i]) + buf.WriteByte(s[i]) i++ } @@ -278,22 +269,22 @@ func truncateLeft(m Method, s string, n int, prefix string) string { // You can use this with [Truncate], [TruncateLeft], and [Cut]. func ByteToGraphemeRange(str string, byteStart, byteStop int) (charStart, charStop int) { bytePos, charPos := 0, 0 - gr := uniseg.NewGraphemes(str) + gr := graphemes.FromString(str) for byteStart > bytePos { if !gr.Next() { break } - bytePos += len(gr.Str()) - charPos += max(1, gr.Width()) + bytePos += len(gr.Value()) + charPos += max(1, displaywidth.String(gr.Value())) } charStart = charPos for byteStop > bytePos { if !gr.Next() { break } - bytePos += len(gr.Str()) - charPos += max(1, gr.Width()) + bytePos += len(gr.Value()) + charPos += max(1, displaywidth.String(gr.Value())) } charStop = charPos - return + return charStart, charStop } diff --git a/vendor/github.com/charmbracelet/x/ansi/urxvt.go b/vendor/github.com/charmbracelet/x/ansi/urxvt.go new file mode 100644 index 000000000..b9e86281d --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/urxvt.go @@ -0,0 +1,17 @@ +package ansi + +import ( + "fmt" + "strings" +) + +// URxvtExt returns an escape sequence for calling a URxvt perl extension with +// the given name and parameters. +// +// OSC 777 ; extension_name ; param1 ; param2 ; ... ST +// OSC 777 ; extension_name ; param1 ; param2 ; ... BEL +// +// See: https://man.archlinux.org/man/extra/rxvt-unicode/urxvt.7.en#XTerm_Operating_System_Commands +func URxvtExt(extension string, params ...string) string { + return fmt.Sprintf("\x1b]777;%s;%s\x07", extension, strings.Join(params, ";")) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/width.go b/vendor/github.com/charmbracelet/x/ansi/width.go index cc0858161..fd680cfdc 100644 --- a/vendor/github.com/charmbracelet/x/ansi/width.go +++ b/vendor/github.com/charmbracelet/x/ansi/width.go @@ -4,8 +4,6 @@ import ( "bytes" "github.com/charmbracelet/x/ansi/parser" - "github.com/mattn/go-runewidth" - "github.com/rivo/uniseg" ) // Strip removes ANSI escape codes from a string. @@ -83,20 +81,16 @@ func stringWidth(m Method, s string) int { } var ( - pstate = parser.GroundState // initial state - cluster string - width int + pstate = parser.GroundState // initial state + width int ) for i := 0; i < len(s); i++ { state, action := parser.Table.Transition(pstate, s[i]) if state == parser.Utf8State { - var w int - cluster, _, w, _ = uniseg.FirstGraphemeClusterInString(s[i:], -1) - if m == WcWidth { - w = runewidth.StringWidth(cluster) - } + cluster, w := FirstGraphemeCluster(s[i:], m) width += w + i += len(cluster) - 1 pstate = parser.GroundState continue diff --git a/vendor/github.com/charmbracelet/x/ansi/wrap.go b/vendor/github.com/charmbracelet/x/ansi/wrap.go index 253e12335..530be6cae 100644 --- a/vendor/github.com/charmbracelet/x/ansi/wrap.go +++ b/vendor/github.com/charmbracelet/x/ansi/wrap.go @@ -2,12 +2,11 @@ package ansi import ( "bytes" + "strings" "unicode" "unicode/utf8" "github.com/charmbracelet/x/ansi/parser" - "github.com/mattn/go-runewidth" - "github.com/rivo/uniseg" ) // nbsp is a non-breaking space. @@ -55,12 +54,9 @@ func hardwrap(m Method, s string, limit int, preserveSpace bool) string { i := 0 for i < len(b) { state, action := parser.Table.Transition(pstate, b[i]) - if state == parser.Utf8State { //nolint:nestif + if state == parser.Utf8State { var width int - cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) - if m == WcWidth { - width = runewidth.StringWidth(string(cluster)) - } + cluster, width = FirstGraphemeCluster(b[i:], m) i += len(cluster) if curWidth+width > limit { @@ -192,10 +188,7 @@ func wordwrap(m Method, s string, limit int, breakpoints string) string { state, action := parser.Table.Transition(pstate, b[i]) if state == parser.Utf8State { //nolint:nestif var width int - cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) - if m == WcWidth { - width = runewidth.StringWidth(string(cluster)) - } + cluster, width = FirstGraphemeCluster(b[i:], m) i += len(cluster) r, _ := utf8.DecodeRune(cluster) @@ -303,7 +296,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string { } var ( - cluster []byte + cluster string buf bytes.Buffer word bytes.Buffer space bytes.Buffer @@ -311,10 +304,12 @@ func wrap(m Method, s string, limit int, breakpoints string) string { curWidth int // written width of the line wordLen int // word buffer len without ANSI escape codes pstate = parser.GroundState // initial state - b = []byte(s) ) addSpace := func() { + if spaceWidth == 0 && space.Len() == 0 { + return + } curWidth += spaceWidth buf.Write(space.Bytes()) space.Reset() @@ -341,30 +336,27 @@ func wrap(m Method, s string, limit int, breakpoints string) string { } i := 0 - for i < len(b) { - state, action := parser.Table.Transition(pstate, b[i]) + for i < len(s) { + state, action := parser.Table.Transition(pstate, s[i]) if state == parser.Utf8State { //nolint:nestif var width int - cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) - if m == WcWidth { - width = runewidth.StringWidth(string(cluster)) - } + cluster, width = FirstGraphemeCluster(s[i:], m) i += len(cluster) - r, _ := utf8.DecodeRune(cluster) + r, _ := utf8.DecodeRuneInString(cluster) switch { case r != utf8.RuneError && unicode.IsSpace(r) && r != nbsp: // nbsp is a non-breaking space addWord() space.WriteRune(r) spaceWidth += width - case bytes.ContainsAny(cluster, breakpoints): + case strings.ContainsAny(cluster, breakpoints): addSpace() if curWidth+wordLen+width > limit { - word.Write(cluster) + word.WriteString(cluster) wordLen += width } else { addWord() - buf.Write(cluster) + buf.WriteString(cluster) curWidth += width } default: @@ -373,12 +365,17 @@ func wrap(m Method, s string, limit int, breakpoints string) string { addWord() } - word.Write(cluster) + word.WriteString(cluster) wordLen += width if curWidth+wordLen+spaceWidth > limit { addNewline() } + + if wordLen == limit { + // Hardwrap the word if it's too long + addWord() + } } pstate = parser.GroundState @@ -387,7 +384,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string { switch action { case parser.PrintAction, parser.ExecuteAction: - switch r := rune(b[i]); { + switch r := rune(s[i]); { case r == '\n': if wordLen == 0 { if curWidth+spaceWidth > limit { @@ -424,6 +421,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string { if curWidth == limit { addNewline() } + word.WriteRune(r) wordLen++ @@ -438,7 +436,7 @@ func wrap(m Method, s string, limit int, breakpoints string) string { } default: - word.WriteByte(b[i]) + word.WriteByte(s[i]) } // We manage the UTF8 state separately manually above. diff --git a/vendor/github.com/charmbracelet/x/cellbuf/buffer.go b/vendor/github.com/charmbracelet/x/cellbuf/buffer.go index 790d1f7c4..e144cc561 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/buffer.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/buffer.go @@ -1,3 +1,4 @@ +// Package cellbuf provides terminal cell buffer functionality. package cellbuf import ( @@ -24,7 +25,7 @@ func NewCell(r rune, comb ...rune) (c *Cell) { } c.Comb = comb c.Width = runewidth.StringWidth(string(append([]rune{r}, comb...))) - return + return c } // NewCellString returns a new cell with the given string content. This is a @@ -46,7 +47,7 @@ func NewCellString(s string) (c *Cell) { c.Comb = append(c.Comb, r) } } - return + return c } // NewGraphemeCell returns a new cell. This is a convenience function that @@ -71,7 +72,7 @@ func newGraphemeCell(s string, w int) (c *Cell) { c.Comb = append(c.Comb, r) } } - return + return c } // Line represents a line in the terminal. @@ -104,7 +105,7 @@ func (l Line) String() (s string) { } } s = strings.TrimRight(s, " ") - return + return s } // At returns the cell at the given x position. @@ -150,7 +151,7 @@ func (l Line) set(x int, c *Cell, clone bool) bool { for j := 1; j < maxCellWidth && x-j >= 0; j++ { wide := l.At(x - j) if wide != nil && wide.Width > 1 && j < wide.Width { - for k := 0; k < wide.Width; k++ { + for k := range wide.Width { l[x-j+k] = wide.Clone().Blank() } break @@ -206,7 +207,7 @@ func (b *Buffer) String() (s string) { s += "\r\n" } } - return + return s } // Line returns a pointer to the line at the given y position. @@ -296,7 +297,7 @@ func (b *Buffer) FillRect(c *Cell, rect Rectangle) { } for y := rect.Min.Y; y < rect.Max.Y; y++ { for x := rect.Min.X; x < rect.Max.X; x += cellWidth { - b.setCell(x, y, c, false) //nolint:errcheck + b.setCell(x, y, c, false) } } } diff --git a/vendor/github.com/charmbracelet/x/cellbuf/cell.go b/vendor/github.com/charmbracelet/x/cellbuf/cell.go index 4d49d45ee..cdb16e9c7 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/cell.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/cell.go @@ -96,7 +96,7 @@ func (c *Cell) Clear() bool { func (c *Cell) Clone() (n *Cell) { n = new(Cell) *n = *c - return + return n } // Blank makes the cell a blank cell by setting the rune to a space, comb to @@ -164,12 +164,12 @@ type UnderlineStyle = ansi.UnderlineStyle // These are the available underline styles. const ( - NoUnderline = ansi.NoUnderlineStyle - SingleUnderline = ansi.SingleUnderlineStyle - DoubleUnderline = ansi.DoubleUnderlineStyle - CurlyUnderline = ansi.CurlyUnderlineStyle - DottedUnderline = ansi.DottedUnderlineStyle - DashedUnderline = ansi.DashedUnderlineStyle + NoUnderline = ansi.UnderlineStyleNone + SingleUnderline = ansi.UnderlineStyleSingle + DoubleUnderline = ansi.UnderlineStyleDouble + CurlyUnderline = ansi.UnderlineStyleCurly + DottedUnderline = ansi.UnderlineStyleDotted + DashedUnderline = ansi.UnderlineStyleDashed ) // Style represents the Style of a cell. @@ -189,7 +189,7 @@ func (s Style) Sequence() string { var b ansi.Style - if s.Attrs != 0 { + if s.Attrs != 0 { //nolint:nestif if s.Attrs&BoldAttr != 0 { b = b.Bold() } @@ -197,36 +197,31 @@ func (s Style) Sequence() string { b = b.Faint() } if s.Attrs&ItalicAttr != 0 { - b = b.Italic() + b = b.Italic(true) } if s.Attrs&SlowBlinkAttr != 0 { - b = b.SlowBlink() + b = b.Blink(true) } if s.Attrs&RapidBlinkAttr != 0 { - b = b.RapidBlink() + b = b.RapidBlink(true) } if s.Attrs&ReverseAttr != 0 { - b = b.Reverse() + b = b.Reverse(true) } if s.Attrs&ConcealAttr != 0 { - b = b.Conceal() + b = b.Conceal(true) } if s.Attrs&StrikethroughAttr != 0 { - b = b.Strikethrough() + b = b.Strikethrough(true) } } if s.UlStyle != NoUnderline { - switch s.UlStyle { - case SingleUnderline: - b = b.Underline() - case DoubleUnderline: - b = b.DoubleUnderline() - case CurlyUnderline: - b = b.CurlyUnderline() - case DottedUnderline: - b = b.DottedUnderline() - case DashedUnderline: - b = b.DashedUnderline() + switch u := s.UlStyle; u { + case NoUnderline: + b = b.Underline(false) + default: + b = b.Underline(true) + b = b.UnderlineStyle(u) } } if s.Fg != nil { @@ -268,64 +263,48 @@ func (s Style) DiffSequence(o Style) string { isNormal bool ) - if s.Attrs != o.Attrs { + if s.Attrs != o.Attrs { //nolint:nestif if s.Attrs&BoldAttr != o.Attrs&BoldAttr { if s.Attrs&BoldAttr != 0 { b = b.Bold() } else if !isNormal { isNormal = true - b = b.NormalIntensity() + b = b.Normal() } } if s.Attrs&FaintAttr != o.Attrs&FaintAttr { if s.Attrs&FaintAttr != 0 { b = b.Faint() } else if !isNormal { - b = b.NormalIntensity() + b = b.Normal() } } if s.Attrs&ItalicAttr != o.Attrs&ItalicAttr { - if s.Attrs&ItalicAttr != 0 { - b = b.Italic() - } else { - b = b.NoItalic() - } + b = b.Italic(s.Attrs&ItalicAttr != 0) } if s.Attrs&SlowBlinkAttr != o.Attrs&SlowBlinkAttr { if s.Attrs&SlowBlinkAttr != 0 { - b = b.SlowBlink() + b = b.Blink(true) } else if !noBlink { noBlink = true - b = b.NoBlink() + b = b.Blink(false) } } if s.Attrs&RapidBlinkAttr != o.Attrs&RapidBlinkAttr { if s.Attrs&RapidBlinkAttr != 0 { - b = b.RapidBlink() + b = b.RapidBlink(true) } else if !noBlink { - b = b.NoBlink() + b = b.Blink(false) } } if s.Attrs&ReverseAttr != o.Attrs&ReverseAttr { - if s.Attrs&ReverseAttr != 0 { - b = b.Reverse() - } else { - b = b.NoReverse() - } + b = b.Reverse(s.Attrs&ReverseAttr != 0) } if s.Attrs&ConcealAttr != o.Attrs&ConcealAttr { - if s.Attrs&ConcealAttr != 0 { - b = b.Conceal() - } else { - b = b.NoConceal() - } + b = b.Conceal(s.Attrs&ConcealAttr != 0) } if s.Attrs&StrikethroughAttr != o.Attrs&StrikethroughAttr { - if s.Attrs&StrikethroughAttr != 0 { - b = b.Strikethrough() - } else { - b = b.NoStrikethrough() - } + b = b.Strikethrough(s.Attrs&StrikethroughAttr != 0) } } diff --git a/vendor/github.com/charmbracelet/x/cellbuf/geom.go b/vendor/github.com/charmbracelet/x/cellbuf/geom.go index c12e6fb1d..7232e76f6 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/geom.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/geom.go @@ -12,7 +12,7 @@ func Pos(x, y int) Position { return image.Pt(x, y) } -// Rectange represents a rectangle. +// Rectangle represents a rectangle. type Rectangle = image.Rectangle // Rect is a shorthand for Rectangle. diff --git a/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go b/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go index 402ac06a6..a55fcc704 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go @@ -75,7 +75,7 @@ func (s *Screen) scrolln(n, top, bot, maxY int) (v bool) { //nolint:unparam ) blank := s.clearBlank() - if n > 0 { + if n > 0 { //nolint:nestif // Scroll up (forward) v = s.scrollUp(n, top, bot, 0, maxY, blank) if !v { @@ -99,7 +99,7 @@ func (s *Screen) scrolln(n, top, bot, maxY int) (v bool) { //nolint:unparam s.move(0, bot-n+1) s.clearToBottom(nil) } else { - for i := 0; i < n; i++ { + for i := range n { s.move(0, bot-i) s.clearToEnd(nil, false) } @@ -124,7 +124,7 @@ func (s *Screen) scrolln(n, top, bot, maxY int) (v bool) { //nolint:unparam // Clear newly shifted-in lines. if v && (nonDestScrollRegion || (memoryBelow && top == 0)) { - for i := 0; i < -n; i++ { + for i := range -n { s.move(0, top+i) s.clearToEnd(nil, false) } @@ -133,7 +133,7 @@ func (s *Screen) scrolln(n, top, bot, maxY int) (v bool) { //nolint:unparam } if !v { - return + return v } s.scrollBuffer(s.curbuf, n, top, bot, blank) @@ -193,7 +193,7 @@ func (s *Screen) touchLine(width, height, y, n int, changed bool) { // scrollUp scrolls the screen up by n lines. func (s *Screen) scrollUp(n, top, bot, minY, maxY int, blank *Cell) bool { - if n == 1 && top == minY && bot == maxY { + if n == 1 && top == minY && bot == maxY { //nolint:nestif s.move(0, bot) s.updatePen(blank) s.buf.WriteByte('\n') @@ -202,13 +202,14 @@ func (s *Screen) scrollUp(n, top, bot, minY, maxY int, blank *Cell) bool { s.updatePen(blank) s.buf.WriteString(ansi.DeleteLine(1)) } else if top == minY && bot == maxY { - if s.xtermLike { + supportsSU := s.caps.Contains(capSU) + if supportsSU { s.move(0, bot) } else { s.move(0, top) } s.updatePen(blank) - if s.xtermLike { + if supportsSU { s.buf.WriteString(ansi.ScrollUp(n)) } else { s.buf.WriteString(strings.Repeat("\n", n)) @@ -225,7 +226,7 @@ func (s *Screen) scrollUp(n, top, bot, minY, maxY int, blank *Cell) bool { // scrollDown scrolls the screen down by n lines. func (s *Screen) scrollDown(n, top, bot, minY, maxY int, blank *Cell) bool { - if n == 1 && top == minY && bot == maxY { + if n == 1 && top == minY && bot == maxY { //nolint:nestif s.move(0, top) s.updatePen(blank) s.buf.WriteString(ansi.ReverseIndex) @@ -236,7 +237,7 @@ func (s *Screen) scrollDown(n, top, bot, minY, maxY int, blank *Cell) bool { } else if top == minY && bot == maxY { s.move(0, top) s.updatePen(blank) - if s.xtermLike { + if s.caps.Contains(capSD) { s.buf.WriteString(ansi.ScrollDown(n)) } else { s.buf.WriteString(strings.Repeat(ansi.ReverseIndex, n)) diff --git a/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go b/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go index 0d25b5492..f4e088619 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go @@ -15,7 +15,7 @@ func hash(l Line) (h uint64) { } h += (h << 5) + uint64(r) } - return + return h } // hashmap represents a single [Line] hash. @@ -33,7 +33,7 @@ func (s *Screen) updateHashmap() { height := s.newbuf.Height() if len(s.oldhash) >= height && len(s.newhash) >= height { // rehash changed lines - for i := 0; i < height; i++ { + for i := range height { _, ok := s.touch[i] if ok { s.oldhash[i] = hash(s.curbuf.Line(i)) @@ -48,14 +48,14 @@ func (s *Screen) updateHashmap() { if len(s.newhash) != height { s.newhash = make([]uint64, height) } - for i := 0; i < height; i++ { + for i := range height { s.oldhash[i] = hash(s.curbuf.Line(i)) s.newhash[i] = hash(s.newbuf.Line(i)) } } s.hashtab = make([]hashmap, height*2) - for i := 0; i < height; i++ { + for i := range height { hashval := s.oldhash[i] // Find matching hash or empty slot @@ -71,7 +71,7 @@ func (s *Screen) updateHashmap() { s.hashtab[idx].oldcount++ s.hashtab[idx].oldindex = i } - for i := 0; i < height; i++ { + for i := range height { hashval := s.newhash[i] // Find matching hash or empty slot @@ -130,7 +130,7 @@ func (s *Screen) updateHashmap() { s.growHunks() } -// scrollOldhash +// scrollOldhash. func (s *Screen) scrollOldhash(n, top, bot int) { if len(s.oldhash) == 0 { return @@ -287,7 +287,7 @@ func (s *Screen) updateCost(from, to Line) (cost int) { cost++ } } - return + return cost } func (s *Screen) updateCostBlank(to Line) (cost int) { @@ -297,5 +297,5 @@ func (s *Screen) updateCostBlank(to Line) (cost int) { cost++ } } - return + return cost } diff --git a/vendor/github.com/charmbracelet/x/cellbuf/link.go b/vendor/github.com/charmbracelet/x/cellbuf/link.go index 112f8e8ab..7bf9f382f 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/link.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/link.go @@ -4,7 +4,7 @@ import ( "github.com/charmbracelet/colorprofile" ) -// Convert converts a hyperlink to respect the given color profile. +// ConvertLink converts a hyperlink to respect the given color profile. func ConvertLink(h Link, p colorprofile.Profile) Link { if p == colorprofile.NoTTY { return Link{} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/pen.go b/vendor/github.com/charmbracelet/x/cellbuf/pen.go new file mode 100644 index 000000000..49744e1d9 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/pen.go @@ -0,0 +1,92 @@ +package cellbuf + +import ( + "io" + + "github.com/charmbracelet/x/ansi" +) + +// PenWriter is a writer that writes to a buffer and keeps track of the current +// pen style and link state for the purpose of wrapping with newlines. +type PenWriter struct { + w io.Writer + p *ansi.Parser + style Style + link Link +} + +// NewPenWriter returns a new PenWriter. +func NewPenWriter(w io.Writer) *PenWriter { + pw := &PenWriter{w: w} + pw.p = ansi.GetParser() + handleCsi := func(cmd ansi.Cmd, params ansi.Params) { + if cmd == 'm' { + ReadStyle(params, &pw.style) + } + } + handleOsc := func(cmd int, data []byte) { + if cmd == 8 { + ReadLink(data, &pw.link) + } + } + pw.p.SetHandler(ansi.Handler{ + HandleCsi: handleCsi, + HandleOsc: handleOsc, + }) + return pw +} + +// Style returns the current pen style. +func (w *PenWriter) Style() Style { + return w.style +} + +// Link returns the current pen link. +func (w *PenWriter) Link() Link { + return w.link +} + +// Write writes to the buffer. +func (w *PenWriter) Write(p []byte) (int, error) { + for i := range p { + b := p[i] + w.p.Advance(b) + if b == '\n' { + if !w.style.Empty() { + _, _ = w.w.Write([]byte(ansi.ResetStyle)) + } + if !w.link.Empty() { + _, _ = w.w.Write([]byte(ansi.ResetHyperlink())) + } + } + + _, _ = w.w.Write([]byte{b}) + if b == '\n' { + if !w.link.Empty() { + _, _ = w.w.Write([]byte(ansi.SetHyperlink(w.link.URL, w.link.Params))) + } + if !w.style.Empty() { + _, _ = w.w.Write([]byte(w.style.Sequence())) + } + } + } + + return len(p), nil +} + +// Close closes the writer, resets the style and link if necessary, and releases +// its parser. Calling it is performance critical, but forgetting it does not +// cause safety issues or leaks. +func (w *PenWriter) Close() error { + if !w.style.Empty() { + _, _ = w.w.Write([]byte(ansi.ResetStyle)) + } + if !w.link.Empty() { + _, _ = w.w.Write([]byte(ansi.ResetHyperlink())) + } + if w.p != nil { + ansi.PutParser(w.p) + w.p = nil + } + return nil +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/screen.go b/vendor/github.com/charmbracelet/x/cellbuf/screen.go index 963b9cac3..be997787e 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/screen.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/screen.go @@ -39,9 +39,9 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa var seq strings.Builder width, height := s.newbuf.Width(), s.newbuf.Height() - if ty != fy { + if ty != fy { //nolint:nestif var yseq string - if s.xtermLike && !s.opts.RelativeCursor { + if s.caps.Contains(capVPA) && !s.opts.RelativeCursor { yseq = ansi.VerticalPositionAbsolute(ty + 1) } @@ -54,9 +54,13 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa } shouldScroll := !s.opts.AltScreen && fy+n >= s.scrollHeight if lf := strings.Repeat("\n", n); shouldScroll || (fy+n < height && len(lf) < len(yseq)) { + //nolint:godox // TODO: Ensure we're not unintentionally scrolling the screen down. yseq = lf s.scrollHeight = max(s.scrollHeight, fy+n) + if s.opts.MapNL { + fx = 0 + } } } else if ty < fy { n := fy - ty @@ -64,6 +68,7 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa yseq = cuu } if n == 1 && fy-1 > 0 { + //nolint:godox // TODO: Ensure we're not unintentionally scrolling the screen up. yseq = ansi.ReverseIndex } @@ -72,9 +77,9 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa seq.WriteString(yseq) } - if tx != fx { + if tx != fx { //nolint:nestif var xseq string - if s.xtermLike && !s.opts.RelativeCursor { + if s.caps.Contains(capHPA) && !s.opts.RelativeCursor { xseq = ansi.HorizontalPositionAbsolute(tx + 1) } @@ -93,7 +98,8 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa if tabs > 0 { cht := ansi.CursorHorizontalForwardTab(tabs) tab := strings.Repeat("\t", tabs) - if false && s.xtermLike && len(cht) < len(tab) { + if false && s.caps.Contains(capCHT) && len(cht) < len(tab) { + //nolint:godox // TODO: The linux console and some terminals such as // Alacritty don't support [ansi.CHT]. Enable this when // we have a way to detect this, or after 5 years when @@ -144,7 +150,7 @@ func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBa } } else if tx < fx { n := fx - tx - if useTabs && s.xtermLike { + if useTabs && s.caps.Contains(capCBT) { // VT100 does not support backward tabs [ansi.CBT]. col := fx @@ -190,7 +196,7 @@ func moveCursor(s *Screen, x, y int, overwrite bool) (seq string) { // Method #0: Use [ansi.CUP] if the distance is long. seq = ansi.CursorPosition(x+1, y+1) if fx == -1 || fy == -1 || notLocal(s.newbuf.Width(), fx, fy, x, y) { - return + return seq } } @@ -234,7 +240,7 @@ func moveCursor(s *Screen, x, y int, overwrite bool) (seq string) { } } - return + return seq } // moveCursor moves the cursor to the specified position. @@ -242,10 +248,10 @@ func (s *Screen) moveCursor(x, y int, overwrite bool) { if !s.opts.AltScreen && s.cur.X == -1 && s.cur.Y == -1 { // First cursor movement in inline mode, move the cursor to the first // column before moving to the target position. - s.buf.WriteByte('\r') //nolint:errcheck + s.buf.WriteByte('\r') s.cur.X, s.cur.Y = 0, 0 } - s.buf.WriteString(moveCursor(s, x, y, overwrite)) //nolint:errcheck + s.buf.WriteString(moveCursor(s, x, y, overwrite)) s.cur.X, s.cur.Y = x, y } @@ -274,10 +280,11 @@ func (s *Screen) move(x, y int) { // Reset wrap around (phantom cursor) state if s.atPhantom { s.cur.X = 0 - s.buf.WriteByte('\r') //nolint:errcheck - s.atPhantom = false // reset phantom cell state + s.buf.WriteByte('\r') + s.atPhantom = false // reset phantom cell state } + //nolint:godox // TODO: Investigate if we need to handle this case and/or if we need the // following code. // @@ -291,7 +298,7 @@ func (s *Screen) move(x, y int) { // // if l > 0 { // s.cur.X = 0 - // s.buf.WriteString("\r" + strings.Repeat("\n", l)) //nolint:errcheck + // s.buf.WriteString("\r" + strings.Repeat("\n", l)) // } // } @@ -339,6 +346,10 @@ type ScreenOptions struct { HardTabs bool // Backspace is whether to use backspace characters to move the cursor. Backspace bool + // MapNL whether we have ONLCR mapping enabled. When we set the terminal to + // raw mode, the ONLCR mode gets disabled. ONLCR maps any newline/linefeed + // (`\n`) character to carriage return + line feed (`\r\n`). + MapNL bool } // lineData represents the metadata for a line. @@ -365,13 +376,13 @@ type Screen struct { opts ScreenOptions mu sync.Mutex method ansi.Method - scrollHeight int // keeps track of how many lines we've scrolled down (inline mode) - altScreenMode bool // whether alternate screen mode is enabled - cursorHidden bool // whether text cursor mode is enabled - clear bool // whether to force clear the screen - xtermLike bool // whether to use xterm-like optimizations, otherwise, it uses vt100 only - queuedText bool // whether we have queued non-zero width text queued up - atPhantom bool // whether the cursor is out of bounds and at a phantom cell + scrollHeight int // keeps track of how many lines we've scrolled down (inline mode) + altScreenMode bool // whether alternate screen mode is enabled + cursorHidden bool // whether text cursor mode is enabled + clear bool // whether to force clear the screen + caps capabilities // terminal control sequence capabilities + queuedText bool // whether we have queued non-zero width text queued up + atPhantom bool // whether the cursor is out of bounds and at a phantom cell } // SetMethod sets the method used to calculate the width of cells. @@ -491,36 +502,77 @@ func (s *Screen) FillRect(cell *Cell, r Rectangle) bool { return true } -// isXtermLike returns whether the terminal is xterm-like. This means that the +// capabilities represents a mask of supported ANSI escape sequences. +type capabilities uint + +const ( + // Vertical Position Absolute [ansi.VPA]. + capVPA capabilities = 1 << iota + // Horizontal Position Absolute [ansi.HPA]. + capHPA + // Cursor Horizontal Tab [ansi.CHT]. + capCHT + // Cursor Backward Tab [ansi.CBT]. + capCBT + // Repeat Previous Character [ansi.REP]. + capREP + // Erase Character [ansi.ECH]. + capECH + // Insert Character [ansi.ICH]. + capICH + // Scroll Down [ansi.SD]. + capSD + // Scroll Up [ansi.SU]. + capSU + + noCaps capabilities = 0 + allCaps = capVPA | capHPA | capCHT | capCBT | capREP | capECH | capICH | + capSD | capSU +) + +// Contains returns whether the capabilities contains the given capability. +func (v capabilities) Contains(c capabilities) bool { + return v&c == c +} + +// xtermCaps returns whether the terminal is xterm-like. This means that the // terminal supports ECMA-48 and ANSI X3.64 escape sequences. -// TODO: Should this be a lookup table into each $TERM terminfo database? Like -// we could keep a map of ANSI escape sequence to terminfo capability name and -// check if the database supports the escape sequence. Instead of keeping a -// list of terminal names here. -func isXtermLike(termtype string) (v bool) { +// xtermCaps returns a list of control sequence capabilities for the given +// terminal type. This only supports a subset of sequences that can +// be different among terminals. +// NOTE: A hybrid approach would be to support Terminfo databases for a full +// set of capabilities. +func xtermCaps(termtype string) (v capabilities) { parts := strings.Split(termtype, "-") if len(parts) == 0 { - return + return v } switch parts[0] { case - "alacritty", "contour", "foot", "ghostty", "kitty", - "linux", "rio", - "screen", "st", "tmux", "wezterm", "xterm": - v = true + v = allCaps + case "alacritty": + v = allCaps + v &^= capCHT // NOTE: alacritty added support for [ansi.CHT] in 2024-12-28 #62d5b13. + case "screen": + // See https://www.gnu.org/software/screen/manual/screen.html#Control-Sequences-1 + v = allCaps + v &^= capREP + case "linux": + // See https://man7.org/linux/man-pages/man4/console_codes.4.html + v = capVPA | capHPA | capECH | capICH } - return + return v } // NewScreen creates a new Screen. @@ -548,14 +600,14 @@ func NewScreen(w io.Writer, width, height int, opts *ScreenOptions) (s *Screen) } s.buf = new(bytes.Buffer) - s.xtermLike = isXtermLike(s.opts.Term) + s.caps = xtermCaps(s.opts.Term) s.curbuf = NewBuffer(width, height) s.newbuf = NewBuffer(width, height) s.cur = Cursor{Position: Pos(-1, -1)} // start at -1 to force a move s.saved = s.cur s.reset() - return + return s } // Width returns the width of the screen. @@ -595,7 +647,7 @@ func (s *Screen) putCell(cell *Cell) { // wrapCursor wraps the cursor to the next line. // -//nolint:unused + func (s *Screen) wrapCursor() { const autoRightMargin = true if autoRightMargin { @@ -628,9 +680,9 @@ func (s *Screen) putAttrCell(cell *Cell) { } s.updatePen(cell) - s.buf.WriteRune(cell.Rune) //nolint:errcheck + s.buf.WriteRune(cell.Rune) for _, c := range cell.Comb { - s.buf.WriteRune(c) //nolint:errcheck + s.buf.WriteRune(c) } s.cur.X += cell.Width @@ -649,12 +701,12 @@ func (s *Screen) putCellLR(cell *Cell) { // Optimize for the lower right corner cell. curX := s.cur.X if cell == nil || !cell.Empty() { - s.buf.WriteString(ansi.ResetAutoWrapMode) //nolint:errcheck + s.buf.WriteString(ansi.ResetModeAutoWrap) s.putAttrCell(cell) // Writing to lower-right corner cell should not wrap. s.atPhantom = false s.cur.X = curX - s.buf.WriteString(ansi.SetAutoWrapMode) //nolint:errcheck + s.buf.WriteString(ansi.SetModeAutoWrap) } } @@ -675,11 +727,11 @@ func (s *Screen) updatePen(cell *Cell) { if cell.Style.Empty() && len(seq) > len(ansi.ResetStyle) { seq = ansi.ResetStyle } - s.buf.WriteString(seq) //nolint:errcheck + s.buf.WriteString(seq) s.cur.Style = cell.Style } if !cell.Link.Equal(&s.cur.Link) { - s.buf.WriteString(ansi.SetHyperlink(cell.Link.URL, cell.Link.Params)) //nolint:errcheck + s.buf.WriteString(ansi.SetHyperlink(cell.Link.URL, cell.Link.Params)) s.cur.Link = cell.Link } } @@ -712,9 +764,9 @@ func (s *Screen) emitRange(line Line, n int) (eoi bool) { ech := ansi.EraseCharacter(count) cup := ansi.CursorPosition(s.cur.X+count, s.cur.Y) rep := ansi.RepeatPreviousCharacter(count) - if s.xtermLike && count > len(ech)+len(cup) && cell0 != nil && cell0.Clear() { + if s.caps.Contains(capECH) && count > len(ech)+len(cup) && cell0 != nil && cell0.Clear() { //nolint:nestif s.updatePen(cell0) - s.buf.WriteString(ech) //nolint:errcheck + s.buf.WriteString(ech) // If this is the last cell, we don't need to move the cursor. if count < n { @@ -722,7 +774,7 @@ func (s *Screen) emitRange(line Line, n int) (eoi bool) { } else { return true // cursor in the middle } - } else if s.xtermLike && count > len(rep) && + } else if s.caps.Contains(capREP) && count > len(rep) && (cell0 == nil || (len(cell0.Comb) == 0 && cell0.Rune < 256)) { // We only support ASCII characters. Most terminals will handle // non-ASCII characters correctly, but some might not, ahem xterm. @@ -740,13 +792,13 @@ func (s *Screen) emitRange(line Line, n int) (eoi bool) { s.putCell(cell0) repCount-- // cell0 is a single width cell ASCII character - s.buf.WriteString(ansi.RepeatPreviousCharacter(repCount)) //nolint:errcheck + s.buf.WriteString(ansi.RepeatPreviousCharacter(repCount)) s.cur.X += repCount if wrapPossible { s.putCell(cell0) } } else { - for i := 0; i < count; i++ { + for i := range count { s.putCell(line.At(i)) } } @@ -755,7 +807,7 @@ func (s *Screen) emitRange(line Line, n int) (eoi bool) { n -= count } - return + return eoi } // putRange puts a range of cells from the old line to the new line. @@ -765,7 +817,7 @@ func (s *Screen) putRange(oldLine, newLine Line, y, start, end int) (eoi bool) { inline := min(len(ansi.CursorPosition(start+1, y+1)), min(len(ansi.HorizontalPositionAbsolute(start+1)), len(ansi.CursorForward(start+1)))) - if (end - start + 1) > inline { + if (end - start + 1) > inline { //nolint:nestif var j, same int for j, same = start, 0; j <= end; j++ { oldCell, newCell := oldLine.At(j), newLine.At(j) @@ -817,9 +869,9 @@ func (s *Screen) clearToEnd(blank *Cell, force bool) { //nolint:unparam s.updatePen(blank) count := s.newbuf.Width() - s.cur.X if s.el0Cost() <= count { - s.buf.WriteString(ansi.EraseLineRight) //nolint:errcheck + s.buf.WriteString(ansi.EraseLineRight) } else { - for i := 0; i < count; i++ { + for range count { s.putCell(blank) } } @@ -839,12 +891,13 @@ func (s *Screen) clearBlank() *Cell { // insertCells inserts the count cells pointed by the given line at the current // cursor position. func (s *Screen) insertCells(line Line, count int) { - if s.xtermLike { + supportsICH := s.caps.Contains(capICH) + if supportsICH { // Use [ansi.ICH] as an optimization. - s.buf.WriteString(ansi.InsertCharacter(count)) //nolint:errcheck + s.buf.WriteString(ansi.InsertCharacter(count)) } else { // Otherwise, use [ansi.IRM] mode. - s.buf.WriteString(ansi.SetInsertReplaceMode) //nolint:errcheck + s.buf.WriteString(ansi.SetModeInsertReplace) } for i := 0; count > 0; i++ { @@ -852,8 +905,8 @@ func (s *Screen) insertCells(line Line, count int) { count-- } - if !s.xtermLike { - s.buf.WriteString(ansi.ResetInsertReplaceMode) //nolint:errcheck + if !supportsICH { + s.buf.WriteString(ansi.ResetModeInsertReplace) } } @@ -862,7 +915,7 @@ func (s *Screen) insertCells(line Line, count int) { // [ansi.EL] 0 i.e. [ansi.EraseLineRight] to clear // trailing spaces. func (s *Screen) el0Cost() int { - if s.xtermLike { + if s.caps != noCaps { return 0 } return len(ansi.EraseLineRight) @@ -878,7 +931,7 @@ func (s *Screen) transformLine(y int) { // Find the first changed cell in the line var lineChanged bool - for i := 0; i < s.newbuf.Width(); i++ { + for i := range s.newbuf.Width() { if !cellEqual(newLine.At(i), oldLine.At(i)) { lineChanged = true break @@ -886,7 +939,7 @@ func (s *Screen) transformLine(y int) { } const ceolStandoutGlitch = false - if ceolStandoutGlitch && lineChanged { + if ceolStandoutGlitch && lineChanged { //nolint:nestif s.move(0, y) s.clearToEnd(nil, false) s.putRange(oldLine, newLine, y, 0, s.newbuf.Width()-1) @@ -897,12 +950,12 @@ func (s *Screen) transformLine(y int) { // [ansi.EraseLineLeft]. if blank == nil || blank.Clear() { var oFirstCell, nFirstCell int - for oFirstCell = 0; oFirstCell < s.curbuf.Width(); oFirstCell++ { + for oFirstCell = range s.curbuf.Width() { if !cellEqual(oldLine.At(oFirstCell), blank) { break } } - for nFirstCell = 0; nFirstCell < s.newbuf.Width(); nFirstCell++ { + for nFirstCell = range s.newbuf.Width() { if !cellEqual(newLine.At(nFirstCell), blank) { break } @@ -925,11 +978,11 @@ func (s *Screen) transformLine(y int) { if nFirstCell >= s.newbuf.Width() { s.move(0, y) s.updatePen(blank) - s.buf.WriteString(ansi.EraseLineRight) //nolint:errcheck + s.buf.WriteString(ansi.EraseLineRight) } else { s.move(nFirstCell-1, y) s.updatePen(blank) - s.buf.WriteString(ansi.EraseLineLeft) //nolint:errcheck + s.buf.WriteString(ansi.EraseLineLeft) } for firstCell < nFirstCell { @@ -1045,7 +1098,7 @@ func (s *Screen) transformLine(y int) { s.move(n+1, y) ichCost := 3 + nLastCell - oLastCell - if s.xtermLike && (nLastCell < nLastNonBlank || ichCost > (m-n)) { + if s.caps.Contains(capICH) && (nLastCell < nLastNonBlank || ichCost > (m-n)) { s.putRange(oldLine, newLine, y, n+1, m) } else { s.insertCells(newLine[n+1:], nLastCell-oLastCell) @@ -1079,7 +1132,7 @@ func (s *Screen) transformLine(y int) { func (s *Screen) deleteCells(count int) { // [ansi.DCH] will shift in cells from the right margin so we need to // ensure that they are the right style. - s.buf.WriteString(ansi.DeleteCharacter(count)) //nolint:errcheck + s.buf.WriteString(ansi.DeleteCharacter(count)) } // clearToBottom clears the screen from the current cursor position to the end @@ -1091,7 +1144,7 @@ func (s *Screen) clearToBottom(blank *Cell) { } s.updatePen(blank) - s.buf.WriteString(ansi.EraseScreenBelow) //nolint:errcheck + s.buf.WriteString(ansi.EraseScreenBelow) // Clear the rest of the current line s.curbuf.ClearRect(Rect(col, row, s.curbuf.Width()-col, 1)) // Clear everything below the current line @@ -1104,7 +1157,7 @@ func (s *Screen) clearToBottom(blank *Cell) { // It returns the top line. func (s *Screen) clearBottom(total int) (top int) { if total <= 0 { - return + return top } top = total @@ -1112,7 +1165,7 @@ func (s *Screen) clearBottom(total int) (top int) { blank := s.clearBlank() canClearWithBlank := blank == nil || blank.Clear() - if canClearWithBlank { + if canClearWithBlank { //nolint:nestif var row int for row = total - 1; row >= 0; row-- { oldLine := s.curbuf.Line(row) @@ -1147,14 +1200,14 @@ func (s *Screen) clearBottom(total int) (top int) { } } - return + return top } // clearScreen clears the screen and put cursor at home. func (s *Screen) clearScreen(blank *Cell) { s.updatePen(blank) - s.buf.WriteString(ansi.CursorHomePosition) //nolint:errcheck - s.buf.WriteString(ansi.EraseEntireScreen) //nolint:errcheck + s.buf.WriteString(ansi.CursorHomePosition) + s.buf.WriteString(ansi.EraseEntireScreen) s.cur.X, s.cur.Y = 0, 0 s.curbuf.Fill(blank) } @@ -1179,7 +1232,7 @@ func (s *Screen) clearUpdate() { s.clearBelow(blank, 0) } nonEmpty = s.clearBottom(nonEmpty) - for i := 0; i < nonEmpty; i++ { + for i := range nonEmpty { s.transformLine(i) } } @@ -1194,13 +1247,13 @@ func (s *Screen) Flush() (err error) { func (s *Screen) flush() (err error) { // Write the buffer if s.buf.Len() > 0 { - _, err = s.w.Write(s.buf.Bytes()) //nolint:errcheck + _, err = s.w.Write(s.buf.Bytes()) if err == nil { s.buf.Reset() } } - return + return err //nolint:wrapcheck } // Render renders changes of the screen to the internal buffer. Call @@ -1221,6 +1274,7 @@ func (s *Screen) render() { return } + //nolint:godox // TODO: Investigate whether this is necessary. Theoretically, terminals // can add/remove tab stops and we should be able to handle that. We could // use [ansi.DECTABSR] to read the tab stops, but that's not implemented in @@ -1235,9 +1289,9 @@ func (s *Screen) render() { // Do we need alt-screen mode? if s.opts.AltScreen != s.altScreenMode { if s.opts.AltScreen { - s.buf.WriteString(ansi.SetAltScreenSaveCursorMode) + s.buf.WriteString(ansi.SetModeAltScreenSaveCursor) } else { - s.buf.WriteString(ansi.ResetAltScreenSaveCursorMode) + s.buf.WriteString(ansi.ResetModeAltScreenSaveCursor) } s.altScreenMode = s.opts.AltScreen } @@ -1252,7 +1306,9 @@ func (s *Screen) render() { // Do we have queued strings to write above the screen? if len(s.queueAbove) > 0 { + //nolint:godox // TODO: Use scrolling region if available. + //nolint:godox // TODO: Use [Screen.Write] [io.Writer] interface. // We need to scroll the screen up by the number of lines in the queue. @@ -1290,12 +1346,13 @@ func (s *Screen) render() { s.clearBelow(nil, s.newbuf.Height()-1) } - if s.clear { + if s.clear { //nolint:nestif s.clearUpdate() s.clear = false } else if len(s.touch) > 0 { if s.opts.AltScreen { // Optimize scrolling for the alternate screen buffer. + //nolint:godox // TODO: Should we optimize for inline mode as well? If so, we need // to know the actual cursor position to use [ansi.DECSTBM]. s.scrollOptimize() @@ -1311,7 +1368,7 @@ func (s *Screen) render() { } nonEmpty = s.clearBottom(nonEmpty) - for i = 0; i < nonEmpty; i++ { + for i = range nonEmpty { _, ok := s.touch[i] if ok { s.transformLine(i) @@ -1359,7 +1416,7 @@ func (s *Screen) Close() (err error) { s.move(0, s.newbuf.Height()-1) if s.altScreenMode { - s.buf.WriteString(ansi.ResetAltScreenSaveCursorMode) + s.buf.WriteString(ansi.ResetModeAltScreenSaveCursor) s.altScreenMode = false } @@ -1371,11 +1428,11 @@ func (s *Screen) Close() (err error) { // Write the buffer err = s.flush() if err != nil { - return + return err } s.reset() - return + return err } // reset resets the screen to its initial state. @@ -1420,9 +1477,9 @@ func (s *Screen) Resize(width, height int) bool { } if height > oldh { - s.ClearRect(Rect(0, max(oldh-1, 0), width, height-oldh)) + s.ClearRect(Rect(0, max(oldh, 0), width, height-oldh)) } else if height < oldh { - s.ClearRect(Rect(0, max(height-1, 0), width, oldh-height)) + s.ClearRect(Rect(0, max(height, 0), width, oldh-height)) } s.mu.Lock() diff --git a/vendor/github.com/charmbracelet/x/cellbuf/style.go b/vendor/github.com/charmbracelet/x/cellbuf/style.go index 82c4afb73..6d3876390 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/style.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/style.go @@ -4,9 +4,9 @@ import ( "github.com/charmbracelet/colorprofile" ) -// Convert converts a style to respect the given color profile. +// ConvertStyle converts a style to respect the given color profile. func ConvertStyle(s Style, p colorprofile.Profile) Style { - switch p { + switch p { //nolint:exhaustive case colorprofile.TrueColor: return s case colorprofile.Ascii: diff --git a/vendor/github.com/charmbracelet/x/cellbuf/utils.go b/vendor/github.com/charmbracelet/x/cellbuf/utils.go index b0452fa9a..a7a2df340 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/utils.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/utils.go @@ -9,20 +9,6 @@ func Height(s string) int { return strings.Count(s, "\n") + 1 } -func min(a, b int) int { //nolint:predeclared - if a > b { - return b - } - return a -} - -func max(a, b int) int { //nolint:predeclared - if a > b { - return a - } - return b -} - func clamp(v, low, high int) int { if high < low { low, high = high, low diff --git a/vendor/github.com/charmbracelet/x/cellbuf/wrap.go b/vendor/github.com/charmbracelet/x/cellbuf/wrap.go index f89f52f60..aaba6ef7e 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/wrap.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/wrap.go @@ -2,6 +2,7 @@ package cellbuf import ( "bytes" + "slices" "unicode" "unicode/utf8" @@ -20,6 +21,16 @@ const nbsp = '\u00a0' // // Note: breakpoints must be a string of 1-cell wide rune characters. func Wrap(s string, limit int, breakpoints string) string { + //nolint:godox + // TODO: Use [PenWriter] once we get + // https://github.com/charmbracelet/lipgloss/pull/489 out the door and + // released. + // The problem is that [ansi.Wrap] doesn't keep track of style and link + // state, so combining both breaks styled space cells. To fix this, we use + // non-breaking space cells for padding and styled blank cells. And since + // both wrapping methods respect non-breaking spaces, we can use them to + // preserve styled spaces in the output. + if len(s) == 0 { return "" } @@ -90,7 +101,7 @@ func Wrap(s string, limit int, breakpoints string) string { seq, width, n, newState := ansi.DecodeSequence(s, state, p) switch width { case 0: - if ansi.Equal(seq, "\t") { + if ansi.Equal(seq, "\t") { //nolint:nestif addWord() space.WriteString(seq) break @@ -176,10 +187,5 @@ func Wrap(s string, limit int, breakpoints string) string { } func runeContainsAny[T string | []rune](r rune, s T) bool { - for _, c := range []rune(s) { - if c == r { - return true - } - } - return false + return slices.Contains([]rune(s), r) } diff --git a/vendor/github.com/charmbracelet/x/cellbuf/writer.go b/vendor/github.com/charmbracelet/x/cellbuf/writer.go index ae8b2a810..720767801 100644 --- a/vendor/github.com/charmbracelet/x/cellbuf/writer.go +++ b/vendor/github.com/charmbracelet/x/cellbuf/writer.go @@ -25,7 +25,7 @@ type CellBuffer interface { func FillRect(s CellBuffer, c *Cell, rect Rectangle) { for y := rect.Min.Y; y < rect.Max.Y; y++ { for x := rect.Min.X; x < rect.Max.X; x++ { - s.SetCell(x, y, c) //nolint:errcheck + s.SetCell(x, y, c) } } } @@ -68,7 +68,7 @@ func SetContent(s CellBuffer, str string) { func Render(d CellBuffer) string { var buf bytes.Buffer height := d.Bounds().Dy() - for y := 0; y < height; y++ { + for y := range height { _, line := RenderLine(d, y) buf.WriteString(line) if y < height-1 { @@ -98,32 +98,32 @@ func RenderLine(d CellBuffer, n int) (w int, line string) { pendingLine = "" } - for x := 0; x < d.Bounds().Dx(); x++ { - if cell := d.Cell(x, n); cell != nil && cell.Width > 0 { + for x := range d.Bounds().Dx() { + if cell := d.Cell(x, n); cell != nil && cell.Width > 0 { //nolint:nestif // Convert the cell's style and link to the given color profile. cellStyle := cell.Style cellLink := cell.Link if cellStyle.Empty() && !pen.Empty() { writePending() - buf.WriteString(ansi.ResetStyle) //nolint:errcheck + buf.WriteString(ansi.ResetStyle) pen.Reset() } if !cellStyle.Equal(&pen) { writePending() seq := cellStyle.DiffSequence(pen) - buf.WriteString(seq) // nolint:errcheck + buf.WriteString(seq) pen = cellStyle } // Write the URL escape sequence if cellLink != link && link.URL != "" { writePending() - buf.WriteString(ansi.ResetHyperlink()) //nolint:errcheck + buf.WriteString(ansi.ResetHyperlink()) link.Reset() } if cellLink != link { writePending() - buf.WriteString(ansi.SetHyperlink(cellLink.URL, cellLink.Params)) //nolint:errcheck + buf.WriteString(ansi.SetHyperlink(cellLink.URL, cellLink.Params)) link = cellLink } @@ -140,10 +140,10 @@ func RenderLine(d CellBuffer, n int) (w int, line string) { } } if link.URL != "" { - buf.WriteString(ansi.ResetHyperlink()) //nolint:errcheck + buf.WriteString(ansi.ResetHyperlink()) } if !pen.Empty() { - buf.WriteString(ansi.ResetStyle) //nolint:errcheck + buf.WriteString(ansi.ResetStyle) } return w, strings.TrimRight(buf.String(), " ") // Trim trailing spaces } @@ -201,7 +201,7 @@ func (s *ScreenWriter) SetContentRect(str string, rect Rectangle) { // string to the width of the screen if it exceeds the width of the screen. // This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape // sequences. -func (s *ScreenWriter) Print(str string, v ...interface{}) { +func (s *ScreenWriter) Print(str string, v ...any) { if len(v) > 0 { str = fmt.Sprintf(str, v...) } @@ -214,7 +214,7 @@ func (s *ScreenWriter) Print(str string, v ...interface{}) { // the width of the screen if it exceeds the width of the screen. // This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape // sequences. -func (s *ScreenWriter) PrintAt(x, y int, str string, v ...interface{}) { +func (s *ScreenWriter) PrintAt(x, y int, str string, v ...any) { if len(v) > 0 { str = fmt.Sprintf(str, v...) } @@ -299,7 +299,7 @@ func printString[T []byte | string]( // Print the cell to the screen cell.Style = style cell.Link = link - s.SetCell(x, y, &cell) //nolint:errcheck + s.SetCell(x, y, &cell) x += width } } @@ -309,6 +309,7 @@ func printString[T []byte | string]( cell.Reset() default: // Valid sequences always have a non-zero Cmd. + //nolint:godox // TODO: Handle cursor movement and other sequences switch { case ansi.HasCsiPrefix(seq) && p.Command() == 'm': @@ -333,7 +334,7 @@ func printString[T []byte | string]( // Make sure to set the last cell if it's not empty. if !cell.Empty() { - s.SetCell(x, y, &cell) //nolint:errcheck + s.SetCell(x, y, &cell) cell.Reset() } } diff --git a/vendor/github.com/charmbracelet/x/term/term.go b/vendor/github.com/charmbracelet/x/term/term.go index 58d6522ca..151e47582 100644 --- a/vendor/github.com/charmbracelet/x/term/term.go +++ b/vendor/github.com/charmbracelet/x/term/term.go @@ -1,3 +1,5 @@ +// Package term provides a platform-independent interfaces for interacting with +// Terminal and TTY devices. package term // State contains platform-specific state of a terminal. diff --git a/vendor/github.com/charmbracelet/x/term/term_plan9.go b/vendor/github.com/charmbracelet/x/term/term_plan9.go new file mode 100644 index 000000000..eba3bdf7f --- /dev/null +++ b/vendor/github.com/charmbracelet/x/term/term_plan9.go @@ -0,0 +1,118 @@ +package term + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +type state struct { + termName string + raw bool + ctl *os.File +} + +// termName returns the name of the terminal or os.ErrNotExist if there is no terminal. +func termName(fd uintptr) (string, error) { + ctl, err := os.ReadFile(filepath.Join("/fd", fmt.Sprintf("%dctl", fd))) + if err != nil { + return "", err + } + f := strings.Fields(string(ctl)) + if len(f) == 0 { + return "", os.ErrNotExist + } + return f[len(f)-1], nil +} + +func isTerminal(fd uintptr) bool { + ctl, err := os.ReadFile(filepath.Join("/fd", fmt.Sprintf("%dctl", fd))) + if err != nil { + return false + } + if strings.Contains(string(ctl), "/dev/cons") { + return true + } + return false +} + +func makeRaw(fd uintptr) (*State, error) { + t, err := termName(fd) + if err != nil { + return nil, err + } + ctl, err := os.OpenFile(t, os.O_RDWR, 0) + if err != nil { + return nil, err + } + if _, err := ctl.Write([]byte("rawon")); err != nil { + return nil, err + } + return &State{state: state{termName: t, raw: true, ctl: ctl}}, nil +} + +func getState(fd uintptr) (*State, error) { + t, err := termName(fd) + if err != nil { + return nil, err + } + ctl, err := os.OpenFile(t, os.O_RDWR, 0) + if err != nil { + return nil, err + } + return &State{state: state{termName: t, raw: false, ctl: ctl}}, nil + +} + +func restore(_ uintptr, state *State) error { + if _, err := state.ctl.Write([]byte("rawoff")); err != nil { + return err + } + return nil +} + +// getSize returns the size. This will only work if you are running +// under a window manager in Plan 9. Else, the only option +// is to return a reasonable default. +func getSize(fd uintptr) (int, int, error) { + w, h := 80, 40 + b, err := os.ReadFile("/dev/wctl") + if err != nil { + return w, h, err + } + f := strings.Fields(string(b)) + if len(f) != 4 { + return w, h, fmt.Errorf("%q only has %d of 4 needed fields:%w", f, len(f), os.ErrInvalid) + } + // The contents of wctl, as defined in the driver, are + // 4 12-char fields: upper left x, y; and lower-right x, y + var ulx, uly, lrx, lry int + if n, err := fmt.Sscanf(string(b[:48]), "%d%d%d%d", &ulx, &uly, &lrx, &lry); n != 4 || err != nil { + return w, h, fmt.Errorf("scanning %q:%d of 4 items scanned:%w", string(b[:48]), n, err) + } + + w, h = lrx-lrx, lry-uly + return w, h, nil +} + +func setState(_ uintptr, state *State) error { + raw := "rawoff" + if state.raw { + raw = "rawon" + } + if _, err := state.ctl.Write([]byte(raw)); err != nil { + return err + } + return nil +} + +func readPassword(fd uintptr) ([]byte, error) { + f := os.NewFile(fd, "cons") + var b [128]byte + n, err := f.Read(b[:]) + if err != nil { + return nil, err + } + return b[:n], nil +} diff --git a/vendor/github.com/charmbracelet/x/term/term_unix.go b/vendor/github.com/charmbracelet/x/term/term_unix.go index 1459cb1be..77706d48e 100644 --- a/vendor/github.com/charmbracelet/x/term/term_unix.go +++ b/vendor/github.com/charmbracelet/x/term/term_unix.go @@ -19,7 +19,7 @@ func isTerminal(fd uintptr) bool { func makeRaw(fd uintptr) (*State, error) { termios, err := unix.IoctlGetTermios(int(fd), ioctlReadTermios) if err != nil { - return nil, err + return nil, err //nolint:wrapcheck } oldState := State{state{Termios: *termios}} @@ -34,7 +34,7 @@ func makeRaw(fd uintptr) (*State, error) { termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(int(fd), ioctlWriteTermios, termios); err != nil { - return nil, err + return nil, err //nolint:wrapcheck } return &oldState, nil @@ -45,26 +45,26 @@ func setState(fd uintptr, state *State) error { if state != nil { termios = &state.Termios } - return unix.IoctlSetTermios(int(fd), ioctlWriteTermios, termios) + return unix.IoctlSetTermios(int(fd), ioctlWriteTermios, termios) //nolint:wrapcheck } func getState(fd uintptr) (*State, error) { termios, err := unix.IoctlGetTermios(int(fd), ioctlReadTermios) if err != nil { - return nil, err + return nil, err //nolint:wrapcheck } return &State{state{Termios: *termios}}, nil } func restore(fd uintptr, state *State) error { - return unix.IoctlSetTermios(int(fd), ioctlWriteTermios, &state.Termios) + return unix.IoctlSetTermios(int(fd), ioctlWriteTermios, &state.Termios) //nolint:wrapcheck } func getSize(fd uintptr) (width, height int, err error) { ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) if err != nil { - return 0, 0, err + return 0, 0, err //nolint:wrapcheck } return int(ws.Col), int(ws.Row), nil } @@ -73,13 +73,13 @@ func getSize(fd uintptr) (width, height int, err error) { type passwordReader int func (r passwordReader) Read(buf []byte) (int, error) { - return unix.Read(int(r), buf) + return unix.Read(int(r), buf) //nolint:wrapcheck } func readPassword(fd uintptr) ([]byte, error) { termios, err := unix.IoctlGetTermios(int(fd), ioctlReadTermios) if err != nil { - return nil, err + return nil, err //nolint:wrapcheck } newState := *termios @@ -87,10 +87,10 @@ func readPassword(fd uintptr) ([]byte, error) { newState.Lflag |= unix.ICANON | unix.ISIG newState.Iflag |= unix.ICRNL if err := unix.IoctlSetTermios(int(fd), ioctlWriteTermios, &newState); err != nil { - return nil, err + return nil, err //nolint:wrapcheck } - defer unix.IoctlSetTermios(int(fd), ioctlWriteTermios, termios) + defer unix.IoctlSetTermios(int(fd), ioctlWriteTermios, termios) //nolint:errcheck return readPasswordLine(passwordReader(fd)) } diff --git a/vendor/github.com/charmbracelet/x/term/term_windows.go b/vendor/github.com/charmbracelet/x/term/term_windows.go index fe7afdec9..d94f68455 100644 --- a/vendor/github.com/charmbracelet/x/term/term_windows.go +++ b/vendor/github.com/charmbracelet/x/term/term_windows.go @@ -24,7 +24,7 @@ func makeRaw(fd uintptr) (*State, error) { if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/github.com/charmbracelet/x/term/util.go b/vendor/github.com/charmbracelet/x/term/util.go index b7313418f..9c2b628bb 100644 --- a/vendor/github.com/charmbracelet/x/term/util.go +++ b/vendor/github.com/charmbracelet/x/term/util.go @@ -41,7 +41,7 @@ func readPasswordLine(reader io.Reader) ([]byte, error) { if err == io.EOF && len(ret) > 0 { return ret, nil } - return ret, err + return ret, err //nolint:wrapcheck } } } diff --git a/vendor/github.com/clipperhouse/displaywidth/.gitignore b/vendor/github.com/clipperhouse/displaywidth/.gitignore new file mode 100644 index 000000000..37a7e92f0 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +*.out diff --git a/vendor/github.com/clipperhouse/displaywidth/AGENTS.md b/vendor/github.com/clipperhouse/displaywidth/AGENTS.md new file mode 100644 index 000000000..853e2917d --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/AGENTS.md @@ -0,0 +1,37 @@ +The goals and overview of this package can be found in the README.md file, +start by reading that. + +The goal of this package is to determine the display (column) width of a +string, UTF-8 bytes, or runes, as would happen in a monospace font, especially +in a terminal. + +When troubleshooting, write Go unit tests instead of executing debug scripts. +The tests can return whatever logs or output you need. If those tests are +only for temporary troubleshooting, clean up the tests after the debugging is +done. + +(Separate executable debugging scripts are messy, tend to have conflicting +dependencies and are hard to cleanup.) + +If you make changes to the trie generation in internal/gen, it can be invoked +by running `go generate` from the top package directory. + +## Pull Requests and branches + +For PRs (pull requests), you can use the gh CLI tool to retrieve details, +or post comments. Then, compare the current branch with main. Reviewing a PR +and reviewing a branch are about the same, but the PR may add context. + +Look for bugs. Think like GitHub Copilot or Cursor BugBot. + +Offer to post a brief summary of the review to the PR, via the gh CLI tool. + +## Comparisons to go-runewidth + +We originally attempted to make this package compatible with go-runewidth. +However, we found that there were too many differences in the handling of +certain characters and properties. + +We believe, preliminarily, that our choices are more correct and complete, +by using more complete categories such as Unicode Cf (format) for zero-width +and Mn (Nonspacing_Mark) for combining marks. diff --git a/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md b/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md new file mode 100644 index 000000000..c97ce3b81 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/CHANGELOG.md @@ -0,0 +1,70 @@ +# Changelog + +## [0.6.1] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.0...v0.6.1) + +### Changed +- Perf improvements: replaced the ASCII lookup table with a simple + function. A bit more cache-friendly. More inlining. +- Bug fix: single regional indicators are now treated as width 2, since that + is what actual terminals do. + +## [0.6.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.5.0...v0.6.0) + +### Added +- New `StringGraphemes` and `BytesGraphemes` methods, for iterating over the +widths of grapheme clusters. + +### Changed +- Added ASCII fast paths + +## [0.5.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.4.1...v0.5.0) + +### Added +- Unicode 16 support +- Improved emoji presentation handling per Unicode TR51 + +### Changed +- Corrected VS15 (U+FE0E) handling: now preserves base character width (no-op) per Unicode TR51 +- Performance optimizations: reduced property lookups + +### Fixed +- VS15 variation selector now correctly preserves base character width instead of forcing width 1 + +## [0.4.1] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.4.0...v0.4.1) + +### Changed +- Updated uax29 dependency +- Improved flag handling + +## [0.4.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.3.1...v0.4.0) + +### Added +- Support for variation selectors (VS15, VS16) and regional indicator pairs (flags) + +## [0.3.1] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.3.0...v0.3.1) + +### Added +- Fuzz testing support + +### Changed +- Updated stringish dependency + +## [0.3.0] + +[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.2.0...v0.3.0) + +### Changed +- Dropped compatibility with go-runewidth +- Trie implementation cleanup diff --git a/vendor/github.com/clipperhouse/displaywidth/LICENSE b/vendor/github.com/clipperhouse/displaywidth/LICENSE new file mode 100644 index 000000000..4b8064eb3 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Matt Sherman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/clipperhouse/displaywidth/README.md b/vendor/github.com/clipperhouse/displaywidth/README.md new file mode 100644 index 000000000..95a02d9af --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/README.md @@ -0,0 +1,171 @@ +# displaywidth + +A high-performance Go package for measuring the monospace display width of strings, UTF-8 bytes, and runes. + +[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/displaywidth.svg)](https://pkg.go.dev/github.com/clipperhouse/displaywidth) +[![Test](https://github.com/clipperhouse/displaywidth/actions/workflows/gotest.yml/badge.svg)](https://github.com/clipperhouse/displaywidth/actions/workflows/gotest.yml) +[![Fuzz](https://github.com/clipperhouse/displaywidth/actions/workflows/gofuzz.yml/badge.svg)](https://github.com/clipperhouse/displaywidth/actions/workflows/gofuzz.yml) + +## Install +```bash +go get github.com/clipperhouse/displaywidth +``` + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/clipperhouse/displaywidth" +) + +func main() { + width := displaywidth.String("Hello, 世界!") + fmt.Println(width) + + width = displaywidth.Bytes([]byte("🌍")) + fmt.Println(width) + + width = displaywidth.Rune('🌍') + fmt.Println(width) +} +``` + +For most purposes, you should use the `String` or `Bytes` methods. They sum +the widths of grapheme clusters in the string or byte slice. + +> Note: in your application, iterating over runes to measure width is likely incorrect; +the smallest unit of display is a grapheme, not a rune. + +### Iterating over graphemes + +If you need the individual graphemes: + +```go +import ( + "fmt" + "github.com/clipperhouse/displaywidth" +) + +func main() { + g := displaywidth.StringGraphemes("Hello, 世界!") + for g.Next() { + width := g.Width() + value := g.Value() + // do something with the width or value + } +} +``` + +### Options + +There is one option, `displaywidth.Options.EastAsianWidth`, which defines +how [East Asian Ambiguous characters](https://www.unicode.org/reports/tr11/#Ambiguous) +are treated. + +When `false` (default), East Asian Ambiguous characters are treated as width 1. +When `true`, they are treated as width 2. + +You may wish to configure this based on environment variables or locale. + `go-runewidth`, for example, does so + [during package initialization](https://github.com/mattn/go-runewidth/blob/master/runewidth.go#L26C1-L45C2). + +`displaywidth` does not do this automatically, we prefer to leave it to you. +You might do something like: + +```go +var width displaywidth.Options // zero value is default + +func init() { + if os.Getenv("EAST_ASIAN_WIDTH") == "true" { + width = displaywidth.Options{EastAsianWidth: true} + } + // or check locale, or any other logic you want +} + +// use it in your logic +func myApp() { + fmt.Println(width.String("Hello, 世界!")) +} +``` + +## Technical standards and compatibility + +This package implements the Unicode East Asian Width standard +([UAX #11](https://www.unicode.org/reports/tr11/tr11-43.html)), and handles +[version selectors](https://en.wikipedia.org/wiki/Variation_Selectors_(Unicode_block)), +and [regional indicator pairs](https://en.wikipedia.org/wiki/Regional_indicator_symbol) +(flags). We implement [Unicode TR51](https://www.unicode.org/reports/tr51/tr51-27.html). We are keeping +an eye on [emerging standards](https://www.jeffquast.com/post/state-of-terminal-emulation-2025/). + + +`clipperhouse/displaywidth`, `mattn/go-runewidth`, and `rivo/uniseg` will +give the same outputs for most real-world text. Extensive details are in the +[compatibility analysis](comparison/COMPATIBILITY_ANALYSIS.md). + +If you wish to investigate the core logic, see the `lookupProperties` and `width` +functions in [width.go](width.go#L139). The essential trie generation logic is in +`buildPropertyBitmap` in [unicode.go](internal/gen/unicode.go#L316). + + +## Prior Art + +[mattn/go-runewidth](https://github.com/mattn/go-runewidth) + +[rivo/uniseg](https://github.com/rivo/uniseg) + +[x/text/width](https://pkg.go.dev/golang.org/x/text/width) + +[x/text/internal/triegen](https://pkg.go.dev/golang.org/x/text/internal/triegen) + +## Benchmarks + +```bash +cd comparison +go test -bench=. -benchmem +``` + +``` +goos: darwin +goarch: arm64 +pkg: github.com/clipperhouse/displaywidth/comparison +cpu: Apple M2 + +BenchmarkString_Mixed/clipperhouse/displaywidth-8 10400 ns/op 162.21 MB/s 0 B/op 0 allocs/op +BenchmarkString_Mixed/mattn/go-runewidth-8 14296 ns/op 118.00 MB/s 0 B/op 0 allocs/op +BenchmarkString_Mixed/rivo/uniseg-8 19770 ns/op 85.33 MB/s 0 B/op 0 allocs/op + +BenchmarkString_EastAsian/clipperhouse/displaywidth-8 10593 ns/op 159.26 MB/s 0 B/op 0 allocs/op +BenchmarkString_EastAsian/mattn/go-runewidth-8 23980 ns/op 70.35 MB/s 0 B/op 0 allocs/op +BenchmarkString_EastAsian/rivo/uniseg-8 19777 ns/op 85.30 MB/s 0 B/op 0 allocs/op + +BenchmarkString_ASCII/clipperhouse/displaywidth-8 1032 ns/op 124.09 MB/s 0 B/op 0 allocs/op +BenchmarkString_ASCII/mattn/go-runewidth-8 1162 ns/op 110.16 MB/s 0 B/op 0 allocs/op +BenchmarkString_ASCII/rivo/uniseg-8 1586 ns/op 80.69 MB/s 0 B/op 0 allocs/op + +BenchmarkString_Emoji/clipperhouse/displaywidth-8 3017 ns/op 240.01 MB/s 0 B/op 0 allocs/op +BenchmarkString_Emoji/mattn/go-runewidth-8 4745 ns/op 152.58 MB/s 0 B/op 0 allocs/op +BenchmarkString_Emoji/rivo/uniseg-8 6745 ns/op 107.34 MB/s 0 B/op 0 allocs/op + +BenchmarkRune_Mixed/clipperhouse/displaywidth-8 3381 ns/op 498.90 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Mixed/mattn/go-runewidth-8 5383 ns/op 313.41 MB/s 0 B/op 0 allocs/op + +BenchmarkRune_EastAsian/clipperhouse/displaywidth-8 3395 ns/op 496.96 MB/s 0 B/op 0 allocs/op +BenchmarkRune_EastAsian/mattn/go-runewidth-8 15645 ns/op 107.83 MB/s 0 B/op 0 allocs/op + +BenchmarkRune_ASCII/clipperhouse/displaywidth-8 257.8 ns/op 496.57 MB/s 0 B/op 0 allocs/op +BenchmarkRune_ASCII/mattn/go-runewidth-8 267.3 ns/op 478.89 MB/s 0 B/op 0 allocs/op + +BenchmarkRune_Emoji/clipperhouse/displaywidth-8 1338 ns/op 541.24 MB/s 0 B/op 0 allocs/op +BenchmarkRune_Emoji/mattn/go-runewidth-8 2287 ns/op 316.58 MB/s 0 B/op 0 allocs/op + +BenchmarkTruncateWithTail/clipperhouse/displaywidth-8 3689 ns/op 47.98 MB/s 192 B/op 14 allocs/op +BenchmarkTruncateWithTail/mattn/go-runewidth-8 8069 ns/op 21.93 MB/s 192 B/op 14 allocs/op + +BenchmarkTruncateWithoutTail/clipperhouse/displaywidth-8 3457 ns/op 66.24 MB/s 0 B/op 0 allocs/op +BenchmarkTruncateWithoutTail/mattn/go-runewidth-8 10441 ns/op 21.93 MB/s 0 B/op 0 allocs/op +``` + +Here are some notes on [how to make Unicode things fast](https://clipperhouse.com/go-unicode/). diff --git a/vendor/github.com/clipperhouse/displaywidth/gen.go b/vendor/github.com/clipperhouse/displaywidth/gen.go new file mode 100644 index 000000000..52e1085bc --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/gen.go @@ -0,0 +1,3 @@ +package displaywidth + +//go:generate go run -C internal/gen . diff --git a/vendor/github.com/clipperhouse/displaywidth/graphemes.go b/vendor/github.com/clipperhouse/displaywidth/graphemes.go new file mode 100644 index 000000000..673c2aab5 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/graphemes.go @@ -0,0 +1,72 @@ +package displaywidth + +import ( + "github.com/clipperhouse/stringish" + "github.com/clipperhouse/uax29/v2/graphemes" +) + +// Graphemes is an iterator over grapheme clusters. +// +// Iterate using the Next method, and get the width of the current grapheme +// using the Width method. +type Graphemes[T stringish.Interface] struct { + iter graphemes.Iterator[T] + options Options +} + +// Next advances the iterator to the next grapheme cluster. +func (g *Graphemes[T]) Next() bool { + return g.iter.Next() +} + +// Value returns the current grapheme cluster. +func (g *Graphemes[T]) Value() T { + return g.iter.Value() +} + +// Width returns the display width of the current grapheme cluster. +func (g *Graphemes[T]) Width() int { + return graphemeWidth(g.Value(), g.options) +} + +// StringGraphemes returns an iterator over grapheme clusters for the given +// string. +// +// Iterate using the Next method, and get the width of the current grapheme +// using the Width method. +func StringGraphemes(s string) Graphemes[string] { + return DefaultOptions.StringGraphemes(s) +} + +// StringGraphemes returns an iterator over grapheme clusters for the given +// string, with the given options. +// +// Iterate using the Next method, and get the width of the current grapheme +// using the Width method. +func (options Options) StringGraphemes(s string) Graphemes[string] { + return Graphemes[string]{ + iter: graphemes.FromString(s), + options: options, + } +} + +// BytesGraphemes returns an iterator over grapheme clusters for the given +// []byte. +// +// Iterate using the Next method, and get the width of the current grapheme +// using the Width method. +func BytesGraphemes(s []byte) Graphemes[[]byte] { + return DefaultOptions.BytesGraphemes(s) +} + +// BytesGraphemes returns an iterator over grapheme clusters for the given +// []byte, with the given options. +// +// Iterate using the Next method, and get the width of the current grapheme +// using the Width method. +func (options Options) BytesGraphemes(s []byte) Graphemes[[]byte] { + return Graphemes[[]byte]{ + iter: graphemes.FromBytes(s), + options: options, + } +} diff --git a/vendor/github.com/clipperhouse/displaywidth/trie.go b/vendor/github.com/clipperhouse/displaywidth/trie.go new file mode 100644 index 000000000..b03f88281 --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/trie.go @@ -0,0 +1,1696 @@ +// Code generated by internal/gen/main.go. DO NOT EDIT. + +package displaywidth + +import "github.com/clipperhouse/stringish" + +// property is an enum representing the properties of a character +type property uint8 + +const ( + // Always 0 width, includes combining marks, control characters, non-printable, etc + _Zero_Width property = iota + 1 + // Always 2 wide (East Asian Wide F/W, Emoji, Regional Indicator) + _Wide + // Width depends on EastAsianWidth option + _East_Asian_Ambiguous +) + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func lookup[T stringish.Interface](s T) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return stringWidthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := stringWidthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := stringWidthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = stringWidthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := stringWidthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = stringWidthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = stringWidthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// stringWidthTrie. Total size: 17664 bytes (17.25 KiB). Checksum: c77d82ff2d69f0d2. +// type stringWidthTrie struct { } + +// func newStringWidthTrie(i int) *stringWidthTrie { +// return &stringWidthTrie{} +// } + +// lookupValue determines the type of block n and looks up the value for b. +func lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(stringWidthValues[n<<6+uint32(b)]) + } +} + +// stringWidthValues: 246 blocks, 15744 entries, 15744 bytes +// The third block is the zero block. +var stringWidthValues = [15744]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0001, 0xc1: 0x0001, 0xc2: 0x0001, 0xc3: 0x0001, 0xc4: 0x0001, 0xc5: 0x0001, + 0xc6: 0x0001, 0xc7: 0x0001, 0xc8: 0x0001, 0xc9: 0x0001, 0xca: 0x0001, 0xcb: 0x0001, + 0xcc: 0x0001, 0xcd: 0x0001, 0xce: 0x0001, 0xcf: 0x0001, 0xd0: 0x0001, 0xd1: 0x0001, + 0xd2: 0x0001, 0xd3: 0x0001, 0xd4: 0x0001, 0xd5: 0x0001, 0xd6: 0x0001, 0xd7: 0x0001, + 0xd8: 0x0001, 0xd9: 0x0001, 0xda: 0x0001, 0xdb: 0x0001, 0xdc: 0x0001, 0xdd: 0x0001, + 0xde: 0x0001, 0xdf: 0x0001, 0xe1: 0x0003, + 0xe4: 0x0003, 0xe7: 0x0003, 0xe8: 0x0003, + 0xea: 0x0003, 0xed: 0x0001, 0xee: 0x0003, + 0xf0: 0x0003, 0xf1: 0x0003, 0xf2: 0x0003, 0xf3: 0x0003, 0xf4: 0x0003, + 0xf6: 0x0003, 0xf7: 0x0003, 0xf8: 0x0003, 0xf9: 0x0003, 0xfa: 0x0003, + 0xfc: 0x0003, 0xfd: 0x0003, 0xfe: 0x0003, 0xff: 0x0003, + // Block 0x4, offset 0x100 + 0x106: 0x0003, + 0x110: 0x0003, + 0x117: 0x0003, + 0x118: 0x0003, + 0x11e: 0x0003, 0x11f: 0x0003, 0x120: 0x0003, 0x121: 0x0003, + 0x126: 0x0003, 0x128: 0x0003, 0x129: 0x0003, + 0x12a: 0x0003, 0x12c: 0x0003, 0x12d: 0x0003, + 0x130: 0x0003, 0x132: 0x0003, 0x133: 0x0003, + 0x137: 0x0003, 0x138: 0x0003, 0x139: 0x0003, 0x13a: 0x0003, + 0x13c: 0x0003, 0x13e: 0x0003, + // Block 0x5, offset 0x140 + 0x141: 0x0003, + 0x151: 0x0003, + 0x153: 0x0003, + 0x15b: 0x0003, + 0x166: 0x0003, 0x167: 0x0003, + 0x16b: 0x0003, + 0x171: 0x0003, 0x172: 0x0003, 0x173: 0x0003, + 0x178: 0x0003, + 0x17f: 0x0003, + // Block 0x6, offset 0x180 + 0x180: 0x0003, 0x181: 0x0003, 0x182: 0x0003, 0x184: 0x0003, + 0x188: 0x0003, 0x189: 0x0003, 0x18a: 0x0003, 0x18b: 0x0003, + 0x18d: 0x0003, + 0x192: 0x0003, 0x193: 0x0003, + 0x1a6: 0x0003, 0x1a7: 0x0003, + 0x1ab: 0x0003, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x0003, 0x1d0: 0x0003, + 0x1d2: 0x0003, 0x1d4: 0x0003, 0x1d6: 0x0003, + 0x1d8: 0x0003, 0x1da: 0x0003, 0x1dc: 0x0003, + // Block 0x8, offset 0x200 + 0x211: 0x0003, + 0x221: 0x0003, + // Block 0x9, offset 0x240 + 0x244: 0x0003, + 0x247: 0x0003, 0x249: 0x0003, 0x24a: 0x0003, 0x24b: 0x0003, + 0x24d: 0x0003, 0x250: 0x0003, + 0x258: 0x0003, 0x259: 0x0003, 0x25a: 0x0003, 0x25b: 0x0003, 0x25d: 0x0003, + 0x25f: 0x0003, + // Block 0xa, offset 0x280 + 0x280: 0x0001, 0x281: 0x0001, 0x282: 0x0001, 0x283: 0x0001, 0x284: 0x0001, 0x285: 0x0001, + 0x286: 0x0001, 0x287: 0x0001, 0x288: 0x0001, 0x289: 0x0001, 0x28a: 0x0001, 0x28b: 0x0001, + 0x28c: 0x0001, 0x28d: 0x0001, 0x28e: 0x0001, 0x28f: 0x0001, 0x290: 0x0001, 0x291: 0x0001, + 0x292: 0x0001, 0x293: 0x0001, 0x294: 0x0001, 0x295: 0x0001, 0x296: 0x0001, 0x297: 0x0001, + 0x298: 0x0001, 0x299: 0x0001, 0x29a: 0x0001, 0x29b: 0x0001, 0x29c: 0x0001, 0x29d: 0x0001, + 0x29e: 0x0001, 0x29f: 0x0001, 0x2a0: 0x0001, 0x2a1: 0x0001, 0x2a2: 0x0001, 0x2a3: 0x0001, + 0x2a4: 0x0001, 0x2a5: 0x0001, 0x2a6: 0x0001, 0x2a7: 0x0001, 0x2a8: 0x0001, 0x2a9: 0x0001, + 0x2aa: 0x0001, 0x2ab: 0x0001, 0x2ac: 0x0001, 0x2ad: 0x0001, 0x2ae: 0x0001, 0x2af: 0x0001, + 0x2b0: 0x0001, 0x2b1: 0x0001, 0x2b2: 0x0001, 0x2b3: 0x0001, 0x2b4: 0x0001, 0x2b5: 0x0001, + 0x2b6: 0x0001, 0x2b7: 0x0001, 0x2b8: 0x0001, 0x2b9: 0x0001, 0x2ba: 0x0001, 0x2bb: 0x0001, + 0x2bc: 0x0001, 0x2bd: 0x0001, 0x2be: 0x0001, 0x2bf: 0x0001, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0001, 0x2c1: 0x0001, 0x2c2: 0x0001, 0x2c3: 0x0001, 0x2c4: 0x0001, 0x2c5: 0x0001, + 0x2c6: 0x0001, 0x2c7: 0x0001, 0x2c8: 0x0001, 0x2c9: 0x0001, 0x2ca: 0x0001, 0x2cb: 0x0001, + 0x2cc: 0x0001, 0x2cd: 0x0001, 0x2ce: 0x0001, 0x2cf: 0x0001, 0x2d0: 0x0001, 0x2d1: 0x0001, + 0x2d2: 0x0001, 0x2d3: 0x0001, 0x2d4: 0x0001, 0x2d5: 0x0001, 0x2d6: 0x0001, 0x2d7: 0x0001, + 0x2d8: 0x0001, 0x2d9: 0x0001, 0x2da: 0x0001, 0x2db: 0x0001, 0x2dc: 0x0001, 0x2dd: 0x0001, + 0x2de: 0x0001, 0x2df: 0x0001, 0x2e0: 0x0001, 0x2e1: 0x0001, 0x2e2: 0x0001, 0x2e3: 0x0001, + 0x2e4: 0x0001, 0x2e5: 0x0001, 0x2e6: 0x0001, 0x2e7: 0x0001, 0x2e8: 0x0001, 0x2e9: 0x0001, + 0x2ea: 0x0001, 0x2eb: 0x0001, 0x2ec: 0x0001, 0x2ed: 0x0001, 0x2ee: 0x0001, 0x2ef: 0x0001, + // Block 0xc, offset 0x300 + 0x311: 0x0003, + 0x312: 0x0003, 0x313: 0x0003, 0x314: 0x0003, 0x315: 0x0003, 0x316: 0x0003, 0x317: 0x0003, + 0x318: 0x0003, 0x319: 0x0003, 0x31a: 0x0003, 0x31b: 0x0003, 0x31c: 0x0003, 0x31d: 0x0003, + 0x31e: 0x0003, 0x31f: 0x0003, 0x320: 0x0003, 0x321: 0x0003, 0x323: 0x0003, + 0x324: 0x0003, 0x325: 0x0003, 0x326: 0x0003, 0x327: 0x0003, 0x328: 0x0003, 0x329: 0x0003, + 0x331: 0x0003, 0x332: 0x0003, 0x333: 0x0003, 0x334: 0x0003, 0x335: 0x0003, + 0x336: 0x0003, 0x337: 0x0003, 0x338: 0x0003, 0x339: 0x0003, 0x33a: 0x0003, 0x33b: 0x0003, + 0x33c: 0x0003, 0x33d: 0x0003, 0x33e: 0x0003, 0x33f: 0x0003, + // Block 0xd, offset 0x340 + 0x340: 0x0003, 0x341: 0x0003, 0x343: 0x0003, 0x344: 0x0003, 0x345: 0x0003, + 0x346: 0x0003, 0x347: 0x0003, 0x348: 0x0003, 0x349: 0x0003, + // Block 0xe, offset 0x380 + 0x381: 0x0003, + 0x390: 0x0003, 0x391: 0x0003, + 0x392: 0x0003, 0x393: 0x0003, 0x394: 0x0003, 0x395: 0x0003, 0x396: 0x0003, 0x397: 0x0003, + 0x398: 0x0003, 0x399: 0x0003, 0x39a: 0x0003, 0x39b: 0x0003, 0x39c: 0x0003, 0x39d: 0x0003, + 0x39e: 0x0003, 0x39f: 0x0003, 0x3a0: 0x0003, 0x3a1: 0x0003, 0x3a2: 0x0003, 0x3a3: 0x0003, + 0x3a4: 0x0003, 0x3a5: 0x0003, 0x3a6: 0x0003, 0x3a7: 0x0003, 0x3a8: 0x0003, 0x3a9: 0x0003, + 0x3aa: 0x0003, 0x3ab: 0x0003, 0x3ac: 0x0003, 0x3ad: 0x0003, 0x3ae: 0x0003, 0x3af: 0x0003, + 0x3b0: 0x0003, 0x3b1: 0x0003, 0x3b2: 0x0003, 0x3b3: 0x0003, 0x3b4: 0x0003, 0x3b5: 0x0003, + 0x3b6: 0x0003, 0x3b7: 0x0003, 0x3b8: 0x0003, 0x3b9: 0x0003, 0x3ba: 0x0003, 0x3bb: 0x0003, + 0x3bc: 0x0003, 0x3bd: 0x0003, 0x3be: 0x0003, 0x3bf: 0x0003, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0003, 0x3c1: 0x0003, 0x3c2: 0x0003, 0x3c3: 0x0003, 0x3c4: 0x0003, 0x3c5: 0x0003, + 0x3c6: 0x0003, 0x3c7: 0x0003, 0x3c8: 0x0003, 0x3c9: 0x0003, 0x3ca: 0x0003, 0x3cb: 0x0003, + 0x3cc: 0x0003, 0x3cd: 0x0003, 0x3ce: 0x0003, 0x3cf: 0x0003, 0x3d1: 0x0003, + // Block 0x10, offset 0x400 + 0x403: 0x0001, 0x404: 0x0001, 0x405: 0x0001, + 0x406: 0x0001, 0x407: 0x0001, 0x408: 0x0001, 0x409: 0x0001, + // Block 0x11, offset 0x440 + 0x451: 0x0001, + 0x452: 0x0001, 0x453: 0x0001, 0x454: 0x0001, 0x455: 0x0001, 0x456: 0x0001, 0x457: 0x0001, + 0x458: 0x0001, 0x459: 0x0001, 0x45a: 0x0001, 0x45b: 0x0001, 0x45c: 0x0001, 0x45d: 0x0001, + 0x45e: 0x0001, 0x45f: 0x0001, 0x460: 0x0001, 0x461: 0x0001, 0x462: 0x0001, 0x463: 0x0001, + 0x464: 0x0001, 0x465: 0x0001, 0x466: 0x0001, 0x467: 0x0001, 0x468: 0x0001, 0x469: 0x0001, + 0x46a: 0x0001, 0x46b: 0x0001, 0x46c: 0x0001, 0x46d: 0x0001, 0x46e: 0x0001, 0x46f: 0x0001, + 0x470: 0x0001, 0x471: 0x0001, 0x472: 0x0001, 0x473: 0x0001, 0x474: 0x0001, 0x475: 0x0001, + 0x476: 0x0001, 0x477: 0x0001, 0x478: 0x0001, 0x479: 0x0001, 0x47a: 0x0001, 0x47b: 0x0001, + 0x47c: 0x0001, 0x47d: 0x0001, 0x47f: 0x0001, + // Block 0x12, offset 0x480 + 0x481: 0x0001, 0x482: 0x0001, 0x484: 0x0001, 0x485: 0x0001, + 0x487: 0x0001, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0001, 0x4c1: 0x0001, 0x4c2: 0x0001, 0x4c3: 0x0001, 0x4c4: 0x0001, 0x4c5: 0x0001, + 0x4d0: 0x0001, 0x4d1: 0x0001, + 0x4d2: 0x0001, 0x4d3: 0x0001, 0x4d4: 0x0001, 0x4d5: 0x0001, 0x4d6: 0x0001, 0x4d7: 0x0001, + 0x4d8: 0x0001, 0x4d9: 0x0001, 0x4da: 0x0001, 0x4dc: 0x0001, + // Block 0x14, offset 0x500 + 0x50b: 0x0001, + 0x50c: 0x0001, 0x50d: 0x0001, 0x50e: 0x0001, 0x50f: 0x0001, 0x510: 0x0001, 0x511: 0x0001, + 0x512: 0x0001, 0x513: 0x0001, 0x514: 0x0001, 0x515: 0x0001, 0x516: 0x0001, 0x517: 0x0001, + 0x518: 0x0001, 0x519: 0x0001, 0x51a: 0x0001, 0x51b: 0x0001, 0x51c: 0x0001, 0x51d: 0x0001, + 0x51e: 0x0001, 0x51f: 0x0001, + 0x530: 0x0001, + // Block 0x15, offset 0x540 + 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x567: 0x0001, 0x568: 0x0001, + 0x56a: 0x0001, 0x56b: 0x0001, 0x56c: 0x0001, 0x56d: 0x0001, + // Block 0x16, offset 0x580 + 0x58f: 0x0001, 0x591: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, + // Block 0x18, offset 0x600 + 0x626: 0x0001, 0x627: 0x0001, 0x628: 0x0001, 0x629: 0x0001, + 0x62a: 0x0001, 0x62b: 0x0001, 0x62c: 0x0001, 0x62d: 0x0001, 0x62e: 0x0001, 0x62f: 0x0001, + 0x630: 0x0001, + // Block 0x19, offset 0x640 + 0x66b: 0x0001, 0x66c: 0x0001, 0x66d: 0x0001, 0x66e: 0x0001, 0x66f: 0x0001, + 0x670: 0x0001, 0x671: 0x0001, 0x672: 0x0001, 0x673: 0x0001, + 0x67d: 0x0001, + // Block 0x1a, offset 0x680 + 0x696: 0x0001, 0x697: 0x0001, + 0x698: 0x0001, 0x699: 0x0001, 0x69b: 0x0001, 0x69c: 0x0001, 0x69d: 0x0001, + 0x69e: 0x0001, 0x69f: 0x0001, 0x6a0: 0x0001, 0x6a1: 0x0001, 0x6a2: 0x0001, 0x6a3: 0x0001, + 0x6a5: 0x0001, 0x6a6: 0x0001, 0x6a7: 0x0001, 0x6a9: 0x0001, + 0x6aa: 0x0001, 0x6ab: 0x0001, 0x6ac: 0x0001, 0x6ad: 0x0001, + // Block 0x1b, offset 0x6c0 + 0x6d9: 0x0001, 0x6da: 0x0001, 0x6db: 0x0001, + // Block 0x1c, offset 0x700 + 0x710: 0x0001, 0x711: 0x0001, + 0x718: 0x0001, 0x719: 0x0001, 0x71a: 0x0001, 0x71b: 0x0001, 0x71c: 0x0001, 0x71d: 0x0001, + 0x71e: 0x0001, 0x71f: 0x0001, + // Block 0x1d, offset 0x740 + 0x74a: 0x0001, 0x74b: 0x0001, + 0x74c: 0x0001, 0x74d: 0x0001, 0x74e: 0x0001, 0x74f: 0x0001, 0x750: 0x0001, 0x751: 0x0001, + 0x752: 0x0001, 0x753: 0x0001, 0x754: 0x0001, 0x755: 0x0001, 0x756: 0x0001, 0x757: 0x0001, + 0x758: 0x0001, 0x759: 0x0001, 0x75a: 0x0001, 0x75b: 0x0001, 0x75c: 0x0001, 0x75d: 0x0001, + 0x75e: 0x0001, 0x75f: 0x0001, 0x760: 0x0001, 0x761: 0x0001, 0x762: 0x0001, 0x763: 0x0001, + 0x764: 0x0001, 0x765: 0x0001, 0x766: 0x0001, 0x767: 0x0001, 0x768: 0x0001, 0x769: 0x0001, + 0x76a: 0x0001, 0x76b: 0x0001, 0x76c: 0x0001, 0x76d: 0x0001, 0x76e: 0x0001, 0x76f: 0x0001, + 0x770: 0x0001, 0x771: 0x0001, 0x772: 0x0001, 0x773: 0x0001, 0x774: 0x0001, 0x775: 0x0001, + 0x776: 0x0001, 0x777: 0x0001, 0x778: 0x0001, 0x779: 0x0001, 0x77a: 0x0001, 0x77b: 0x0001, + 0x77c: 0x0001, 0x77d: 0x0001, 0x77e: 0x0001, 0x77f: 0x0001, + // Block 0x1e, offset 0x780 + 0x780: 0x0001, 0x781: 0x0001, 0x782: 0x0001, + 0x7ba: 0x0001, + 0x7bc: 0x0001, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x0001, 0x7c2: 0x0001, 0x7c3: 0x0001, 0x7c4: 0x0001, 0x7c5: 0x0001, + 0x7c6: 0x0001, 0x7c7: 0x0001, 0x7c8: 0x0001, + 0x7cd: 0x0001, 0x7d1: 0x0001, + 0x7d2: 0x0001, 0x7d3: 0x0001, 0x7d4: 0x0001, 0x7d5: 0x0001, 0x7d6: 0x0001, 0x7d7: 0x0001, + 0x7e2: 0x0001, 0x7e3: 0x0001, + // Block 0x20, offset 0x800 + 0x801: 0x0001, + 0x83c: 0x0001, + // Block 0x21, offset 0x840 + 0x841: 0x0001, 0x842: 0x0001, 0x843: 0x0001, 0x844: 0x0001, + 0x84d: 0x0001, + 0x862: 0x0001, 0x863: 0x0001, + 0x87e: 0x0001, + // Block 0x22, offset 0x880 + 0x881: 0x0001, 0x882: 0x0001, + 0x8bc: 0x0001, + // Block 0x23, offset 0x8c0 + 0x8c1: 0x0001, 0x8c2: 0x0001, + 0x8c7: 0x0001, 0x8c8: 0x0001, 0x8cb: 0x0001, + 0x8cc: 0x0001, 0x8cd: 0x0001, 0x8d1: 0x0001, + 0x8f0: 0x0001, 0x8f1: 0x0001, 0x8f5: 0x0001, + // Block 0x24, offset 0x900 + 0x901: 0x0001, 0x902: 0x0001, 0x903: 0x0001, 0x904: 0x0001, 0x905: 0x0001, + 0x907: 0x0001, 0x908: 0x0001, + 0x90d: 0x0001, + 0x922: 0x0001, 0x923: 0x0001, + 0x93a: 0x0001, 0x93b: 0x0001, + 0x93c: 0x0001, 0x93d: 0x0001, 0x93e: 0x0001, 0x93f: 0x0001, + // Block 0x25, offset 0x940 + 0x941: 0x0001, + 0x97c: 0x0001, 0x97f: 0x0001, + // Block 0x26, offset 0x980 + 0x981: 0x0001, 0x982: 0x0001, 0x983: 0x0001, 0x984: 0x0001, + 0x98d: 0x0001, + 0x995: 0x0001, 0x996: 0x0001, + 0x9a2: 0x0001, 0x9a3: 0x0001, + // Block 0x27, offset 0x9c0 + 0x9c2: 0x0001, + // Block 0x28, offset 0xa00 + 0xa00: 0x0001, + 0xa0d: 0x0001, + // Block 0x29, offset 0xa40 + 0xa40: 0x0001, 0xa44: 0x0001, + 0xa7c: 0x0001, 0xa7e: 0x0001, 0xa7f: 0x0001, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0001, + 0xa86: 0x0001, 0xa87: 0x0001, 0xa88: 0x0001, 0xa8a: 0x0001, 0xa8b: 0x0001, + 0xa8c: 0x0001, 0xa8d: 0x0001, + 0xa95: 0x0001, 0xa96: 0x0001, + 0xaa2: 0x0001, 0xaa3: 0x0001, + // Block 0x2b, offset 0xac0 + 0xac6: 0x0001, + 0xacc: 0x0001, 0xacd: 0x0001, + 0xae2: 0x0001, 0xae3: 0x0001, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0001, 0xb01: 0x0001, + 0xb3b: 0x0001, + 0xb3c: 0x0001, + // Block 0x2d, offset 0xb40 + 0xb41: 0x0001, 0xb42: 0x0001, 0xb43: 0x0001, 0xb44: 0x0001, + 0xb4d: 0x0001, + 0xb62: 0x0001, 0xb63: 0x0001, + // Block 0x2e, offset 0xb80 + 0xb81: 0x0001, + // Block 0x2f, offset 0xbc0 + 0xbca: 0x0001, + 0xbd2: 0x0001, 0xbd3: 0x0001, 0xbd4: 0x0001, 0xbd6: 0x0001, + // Block 0x30, offset 0xc00 + 0xc31: 0x0001, 0xc34: 0x0001, 0xc35: 0x0001, + 0xc36: 0x0001, 0xc37: 0x0001, 0xc38: 0x0001, 0xc39: 0x0001, 0xc3a: 0x0001, + // Block 0x31, offset 0xc40 + 0xc47: 0x0001, 0xc48: 0x0001, 0xc49: 0x0001, 0xc4a: 0x0001, 0xc4b: 0x0001, + 0xc4c: 0x0001, 0xc4d: 0x0001, 0xc4e: 0x0001, + // Block 0x32, offset 0xc80 + 0xcb1: 0x0001, 0xcb4: 0x0001, 0xcb5: 0x0001, + 0xcb6: 0x0001, 0xcb7: 0x0001, 0xcb8: 0x0001, 0xcb9: 0x0001, 0xcba: 0x0001, 0xcbb: 0x0001, + 0xcbc: 0x0001, + // Block 0x33, offset 0xcc0 + 0xcc8: 0x0001, 0xcc9: 0x0001, 0xcca: 0x0001, 0xccb: 0x0001, + 0xccc: 0x0001, 0xccd: 0x0001, 0xcce: 0x0001, + // Block 0x34, offset 0xd00 + 0xd18: 0x0001, 0xd19: 0x0001, + 0xd35: 0x0001, + 0xd37: 0x0001, 0xd39: 0x0001, + // Block 0x35, offset 0xd40 + 0xd71: 0x0001, 0xd72: 0x0001, 0xd73: 0x0001, 0xd74: 0x0001, 0xd75: 0x0001, + 0xd76: 0x0001, 0xd77: 0x0001, 0xd78: 0x0001, 0xd79: 0x0001, 0xd7a: 0x0001, 0xd7b: 0x0001, + 0xd7c: 0x0001, 0xd7d: 0x0001, 0xd7e: 0x0001, + // Block 0x36, offset 0xd80 + 0xd80: 0x0001, 0xd81: 0x0001, 0xd82: 0x0001, 0xd83: 0x0001, 0xd84: 0x0001, + 0xd86: 0x0001, 0xd87: 0x0001, + 0xd8d: 0x0001, 0xd8e: 0x0001, 0xd8f: 0x0001, 0xd90: 0x0001, 0xd91: 0x0001, + 0xd92: 0x0001, 0xd93: 0x0001, 0xd94: 0x0001, 0xd95: 0x0001, 0xd96: 0x0001, 0xd97: 0x0001, + 0xd99: 0x0001, 0xd9a: 0x0001, 0xd9b: 0x0001, 0xd9c: 0x0001, 0xd9d: 0x0001, + 0xd9e: 0x0001, 0xd9f: 0x0001, 0xda0: 0x0001, 0xda1: 0x0001, 0xda2: 0x0001, 0xda3: 0x0001, + 0xda4: 0x0001, 0xda5: 0x0001, 0xda6: 0x0001, 0xda7: 0x0001, 0xda8: 0x0001, 0xda9: 0x0001, + 0xdaa: 0x0001, 0xdab: 0x0001, 0xdac: 0x0001, 0xdad: 0x0001, 0xdae: 0x0001, 0xdaf: 0x0001, + 0xdb0: 0x0001, 0xdb1: 0x0001, 0xdb2: 0x0001, 0xdb3: 0x0001, 0xdb4: 0x0001, 0xdb5: 0x0001, + 0xdb6: 0x0001, 0xdb7: 0x0001, 0xdb8: 0x0001, 0xdb9: 0x0001, 0xdba: 0x0001, 0xdbb: 0x0001, + 0xdbc: 0x0001, + // Block 0x37, offset 0xdc0 + 0xdc6: 0x0001, + // Block 0x38, offset 0xe00 + 0xe2d: 0x0001, 0xe2e: 0x0001, 0xe2f: 0x0001, + 0xe30: 0x0001, 0xe32: 0x0001, 0xe33: 0x0001, 0xe34: 0x0001, 0xe35: 0x0001, + 0xe36: 0x0001, 0xe37: 0x0001, 0xe39: 0x0001, 0xe3a: 0x0001, + 0xe3d: 0x0001, 0xe3e: 0x0001, + // Block 0x39, offset 0xe40 + 0xe58: 0x0001, 0xe59: 0x0001, + 0xe5e: 0x0001, 0xe5f: 0x0001, 0xe60: 0x0001, + 0xe71: 0x0001, 0xe72: 0x0001, 0xe73: 0x0001, 0xe74: 0x0001, + // Block 0x3a, offset 0xe80 + 0xe82: 0x0001, 0xe85: 0x0001, + 0xe86: 0x0001, + 0xe8d: 0x0001, + 0xe9d: 0x0001, + // Block 0x3b, offset 0xec0 + 0xec0: 0x0002, 0xec1: 0x0002, 0xec2: 0x0002, 0xec3: 0x0002, 0xec4: 0x0002, 0xec5: 0x0002, + 0xec6: 0x0002, 0xec7: 0x0002, 0xec8: 0x0002, 0xec9: 0x0002, 0xeca: 0x0002, 0xecb: 0x0002, + 0xecc: 0x0002, 0xecd: 0x0002, 0xece: 0x0002, 0xecf: 0x0002, 0xed0: 0x0002, 0xed1: 0x0002, + 0xed2: 0x0002, 0xed3: 0x0002, 0xed4: 0x0002, 0xed5: 0x0002, 0xed6: 0x0002, 0xed7: 0x0002, + 0xed8: 0x0002, 0xed9: 0x0002, 0xeda: 0x0002, 0xedb: 0x0002, 0xedc: 0x0002, 0xedd: 0x0002, + 0xede: 0x0002, 0xedf: 0x0002, 0xee0: 0x0002, 0xee1: 0x0002, 0xee2: 0x0002, 0xee3: 0x0002, + 0xee4: 0x0002, 0xee5: 0x0002, 0xee6: 0x0002, 0xee7: 0x0002, 0xee8: 0x0002, 0xee9: 0x0002, + 0xeea: 0x0002, 0xeeb: 0x0002, 0xeec: 0x0002, 0xeed: 0x0002, 0xeee: 0x0002, 0xeef: 0x0002, + 0xef0: 0x0002, 0xef1: 0x0002, 0xef2: 0x0002, 0xef3: 0x0002, 0xef4: 0x0002, 0xef5: 0x0002, + 0xef6: 0x0002, 0xef7: 0x0002, 0xef8: 0x0002, 0xef9: 0x0002, 0xefa: 0x0002, 0xefb: 0x0002, + 0xefc: 0x0002, 0xefd: 0x0002, 0xefe: 0x0002, 0xeff: 0x0002, + // Block 0x3c, offset 0xf00 + 0xf00: 0x0002, 0xf01: 0x0002, 0xf02: 0x0002, 0xf03: 0x0002, 0xf04: 0x0002, 0xf05: 0x0002, + 0xf06: 0x0002, 0xf07: 0x0002, 0xf08: 0x0002, 0xf09: 0x0002, 0xf0a: 0x0002, 0xf0b: 0x0002, + 0xf0c: 0x0002, 0xf0d: 0x0002, 0xf0e: 0x0002, 0xf0f: 0x0002, 0xf10: 0x0002, 0xf11: 0x0002, + 0xf12: 0x0002, 0xf13: 0x0002, 0xf14: 0x0002, 0xf15: 0x0002, 0xf16: 0x0002, 0xf17: 0x0002, + 0xf18: 0x0002, 0xf19: 0x0002, 0xf1a: 0x0002, 0xf1b: 0x0002, 0xf1c: 0x0002, 0xf1d: 0x0002, + 0xf1e: 0x0002, 0xf1f: 0x0002, + // Block 0x3d, offset 0xf40 + 0xf5d: 0x0001, + 0xf5e: 0x0001, 0xf5f: 0x0001, + // Block 0x3e, offset 0xf80 + 0xf92: 0x0001, 0xf93: 0x0001, 0xf94: 0x0001, + 0xfb2: 0x0001, 0xfb3: 0x0001, + // Block 0x3f, offset 0xfc0 + 0xfd2: 0x0001, 0xfd3: 0x0001, + 0xff2: 0x0001, 0xff3: 0x0001, + // Block 0x40, offset 0x1000 + 0x1034: 0x0001, 0x1035: 0x0001, + 0x1037: 0x0001, 0x1038: 0x0001, 0x1039: 0x0001, 0x103a: 0x0001, 0x103b: 0x0001, + 0x103c: 0x0001, 0x103d: 0x0001, + // Block 0x41, offset 0x1040 + 0x1046: 0x0001, 0x1049: 0x0001, 0x104a: 0x0001, 0x104b: 0x0001, + 0x104c: 0x0001, 0x104d: 0x0001, 0x104e: 0x0001, 0x104f: 0x0001, 0x1050: 0x0001, 0x1051: 0x0001, + 0x1052: 0x0001, 0x1053: 0x0001, + 0x105d: 0x0001, + // Block 0x42, offset 0x1080 + 0x108b: 0x0001, + 0x108c: 0x0001, 0x108d: 0x0001, 0x108e: 0x0001, 0x108f: 0x0001, + // Block 0x43, offset 0x10c0 + 0x10c5: 0x0001, + 0x10c6: 0x0001, + 0x10e9: 0x0001, + // Block 0x44, offset 0x1100 + 0x1120: 0x0001, 0x1121: 0x0001, 0x1122: 0x0001, + 0x1127: 0x0001, 0x1128: 0x0001, + 0x1132: 0x0001, + 0x1139: 0x0001, 0x113a: 0x0001, 0x113b: 0x0001, + // Block 0x45, offset 0x1140 + 0x1157: 0x0001, + 0x1158: 0x0001, 0x115b: 0x0001, + // Block 0x46, offset 0x1180 + 0x1196: 0x0001, + 0x1198: 0x0001, 0x1199: 0x0001, 0x119a: 0x0001, 0x119b: 0x0001, 0x119c: 0x0001, 0x119d: 0x0001, + 0x119e: 0x0001, 0x11a0: 0x0001, 0x11a2: 0x0001, + 0x11a5: 0x0001, 0x11a6: 0x0001, 0x11a7: 0x0001, 0x11a8: 0x0001, 0x11a9: 0x0001, + 0x11aa: 0x0001, 0x11ab: 0x0001, 0x11ac: 0x0001, + 0x11b3: 0x0001, 0x11b4: 0x0001, 0x11b5: 0x0001, + 0x11b6: 0x0001, 0x11b7: 0x0001, 0x11b8: 0x0001, 0x11b9: 0x0001, 0x11ba: 0x0001, 0x11bb: 0x0001, + 0x11bc: 0x0001, 0x11bf: 0x0001, + // Block 0x47, offset 0x11c0 + 0x11f0: 0x0001, 0x11f1: 0x0001, 0x11f2: 0x0001, 0x11f3: 0x0001, 0x11f4: 0x0001, 0x11f5: 0x0001, + 0x11f6: 0x0001, 0x11f7: 0x0001, 0x11f8: 0x0001, 0x11f9: 0x0001, 0x11fa: 0x0001, 0x11fb: 0x0001, + 0x11fc: 0x0001, 0x11fd: 0x0001, 0x11fe: 0x0001, 0x11ff: 0x0001, + // Block 0x48, offset 0x1200 + 0x1200: 0x0001, 0x1201: 0x0001, 0x1202: 0x0001, 0x1203: 0x0001, 0x1204: 0x0001, 0x1205: 0x0001, + 0x1206: 0x0001, 0x1207: 0x0001, 0x1208: 0x0001, 0x1209: 0x0001, 0x120a: 0x0001, 0x120b: 0x0001, + 0x120c: 0x0001, 0x120d: 0x0001, 0x120e: 0x0001, + // Block 0x49, offset 0x1240 + 0x1240: 0x0001, 0x1241: 0x0001, 0x1242: 0x0001, 0x1243: 0x0001, + 0x1274: 0x0001, + 0x1276: 0x0001, 0x1277: 0x0001, 0x1278: 0x0001, 0x1279: 0x0001, 0x127a: 0x0001, + 0x127c: 0x0001, + // Block 0x4a, offset 0x1280 + 0x1282: 0x0001, + 0x12ab: 0x0001, 0x12ac: 0x0001, 0x12ad: 0x0001, 0x12ae: 0x0001, 0x12af: 0x0001, + 0x12b0: 0x0001, 0x12b1: 0x0001, 0x12b2: 0x0001, 0x12b3: 0x0001, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0001, 0x12c1: 0x0001, + 0x12e2: 0x0001, 0x12e3: 0x0001, + 0x12e4: 0x0001, 0x12e5: 0x0001, 0x12e8: 0x0001, 0x12e9: 0x0001, + 0x12eb: 0x0001, 0x12ec: 0x0001, 0x12ed: 0x0001, + // Block 0x4c, offset 0x1300 + 0x1326: 0x0001, 0x1328: 0x0001, 0x1329: 0x0001, + 0x132d: 0x0001, 0x132f: 0x0001, + 0x1330: 0x0001, 0x1331: 0x0001, + // Block 0x4d, offset 0x1340 + 0x136c: 0x0001, 0x136d: 0x0001, 0x136e: 0x0001, 0x136f: 0x0001, + 0x1370: 0x0001, 0x1371: 0x0001, 0x1372: 0x0001, 0x1373: 0x0001, + 0x1376: 0x0001, 0x1377: 0x0001, + // Block 0x4e, offset 0x1380 + 0x1390: 0x0001, 0x1391: 0x0001, + 0x1392: 0x0001, 0x1394: 0x0001, 0x1395: 0x0001, 0x1396: 0x0001, 0x1397: 0x0001, + 0x1398: 0x0001, 0x1399: 0x0001, 0x139a: 0x0001, 0x139b: 0x0001, 0x139c: 0x0001, 0x139d: 0x0001, + 0x139e: 0x0001, 0x139f: 0x0001, 0x13a0: 0x0001, 0x13a2: 0x0001, 0x13a3: 0x0001, + 0x13a4: 0x0001, 0x13a5: 0x0001, 0x13a6: 0x0001, 0x13a7: 0x0001, 0x13a8: 0x0001, + 0x13ad: 0x0001, + 0x13b4: 0x0001, + 0x13b8: 0x0001, 0x13b9: 0x0001, + // Block 0x4f, offset 0x13c0 + 0x13cb: 0x0001, + 0x13cc: 0x0001, 0x13cd: 0x0001, 0x13ce: 0x0001, 0x13cf: 0x0001, 0x13d0: 0x0003, + 0x13d3: 0x0003, 0x13d4: 0x0003, 0x13d5: 0x0003, 0x13d6: 0x0003, + 0x13d8: 0x0003, 0x13d9: 0x0003, 0x13dc: 0x0003, 0x13dd: 0x0003, + 0x13e0: 0x0003, 0x13e1: 0x0003, 0x13e2: 0x0003, + 0x13e4: 0x0003, 0x13e5: 0x0003, 0x13e6: 0x0003, 0x13e7: 0x0003, 0x13e8: 0x0001, 0x13e9: 0x0001, + 0x13ea: 0x0001, 0x13eb: 0x0001, 0x13ec: 0x0001, 0x13ed: 0x0001, 0x13ee: 0x0001, + 0x13f0: 0x0003, 0x13f2: 0x0003, 0x13f3: 0x0003, 0x13f5: 0x0003, + 0x13fb: 0x0003, + 0x13fe: 0x0003, + // Block 0x50, offset 0x1400 + 0x1420: 0x0001, 0x1421: 0x0001, 0x1422: 0x0001, 0x1423: 0x0001, + 0x1424: 0x0001, 0x1426: 0x0001, 0x1427: 0x0001, 0x1428: 0x0001, 0x1429: 0x0001, + 0x142a: 0x0001, 0x142b: 0x0001, 0x142c: 0x0001, 0x142d: 0x0001, 0x142e: 0x0001, 0x142f: 0x0001, + 0x1434: 0x0003, + 0x143f: 0x0003, + // Block 0x51, offset 0x1440 + 0x1441: 0x0003, 0x1442: 0x0003, 0x1443: 0x0003, 0x1444: 0x0003, + 0x146c: 0x0003, + // Block 0x52, offset 0x1480 + 0x1490: 0x0001, 0x1491: 0x0001, + 0x1492: 0x0001, 0x1493: 0x0001, 0x1494: 0x0001, 0x1495: 0x0001, 0x1496: 0x0001, 0x1497: 0x0001, + 0x1498: 0x0001, 0x1499: 0x0001, 0x149a: 0x0001, 0x149b: 0x0001, 0x149c: 0x0001, 0x149d: 0x0001, + 0x149e: 0x0001, 0x149f: 0x0001, 0x14a0: 0x0001, 0x14a1: 0x0001, 0x14a2: 0x0001, 0x14a3: 0x0001, + 0x14a4: 0x0001, 0x14a5: 0x0001, 0x14a6: 0x0001, 0x14a7: 0x0001, 0x14a8: 0x0001, 0x14a9: 0x0001, + 0x14aa: 0x0001, 0x14ab: 0x0001, 0x14ac: 0x0001, 0x14ad: 0x0001, 0x14ae: 0x0001, 0x14af: 0x0001, + 0x14b0: 0x0001, + // Block 0x53, offset 0x14c0 + 0x14c3: 0x0003, 0x14c5: 0x0003, + 0x14c9: 0x0003, + 0x14d3: 0x0003, 0x14d6: 0x0003, + 0x14e1: 0x0003, 0x14e2: 0x0003, + 0x14e6: 0x0003, + 0x14eb: 0x0003, + // Block 0x54, offset 0x1500 + 0x1513: 0x0003, 0x1514: 0x0003, + 0x151b: 0x0003, 0x151c: 0x0003, 0x151d: 0x0003, + 0x151e: 0x0003, 0x1520: 0x0003, 0x1521: 0x0003, 0x1522: 0x0003, 0x1523: 0x0003, + 0x1524: 0x0003, 0x1525: 0x0003, 0x1526: 0x0003, 0x1527: 0x0003, 0x1528: 0x0003, 0x1529: 0x0003, + 0x152a: 0x0003, 0x152b: 0x0003, + 0x1530: 0x0003, 0x1531: 0x0003, 0x1532: 0x0003, 0x1533: 0x0003, 0x1534: 0x0003, 0x1535: 0x0003, + 0x1536: 0x0003, 0x1537: 0x0003, 0x1538: 0x0003, 0x1539: 0x0003, + // Block 0x55, offset 0x1540 + 0x1549: 0x0003, + 0x1550: 0x0003, 0x1551: 0x0003, + 0x1552: 0x0003, 0x1553: 0x0003, 0x1554: 0x0003, 0x1555: 0x0003, 0x1556: 0x0003, 0x1557: 0x0003, + 0x1558: 0x0003, 0x1559: 0x0003, + 0x1578: 0x0003, 0x1579: 0x0003, + // Block 0x56, offset 0x1580 + 0x1592: 0x0003, 0x1594: 0x0003, + 0x15a7: 0x0003, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0003, 0x15c2: 0x0003, 0x15c3: 0x0003, + 0x15c7: 0x0003, 0x15c8: 0x0003, 0x15cb: 0x0003, + 0x15cf: 0x0003, 0x15d1: 0x0003, + 0x15d5: 0x0003, + 0x15da: 0x0003, 0x15dd: 0x0003, + 0x15de: 0x0003, 0x15df: 0x0003, 0x15e0: 0x0003, 0x15e3: 0x0003, + 0x15e5: 0x0003, 0x15e7: 0x0003, 0x15e8: 0x0003, 0x15e9: 0x0003, + 0x15ea: 0x0003, 0x15eb: 0x0003, 0x15ec: 0x0003, 0x15ee: 0x0003, + 0x15f4: 0x0003, 0x15f5: 0x0003, + 0x15f6: 0x0003, 0x15f7: 0x0003, + 0x15fc: 0x0003, 0x15fd: 0x0003, + // Block 0x58, offset 0x1600 + 0x1608: 0x0003, + 0x160c: 0x0003, + 0x1612: 0x0003, + 0x1620: 0x0003, 0x1621: 0x0003, + 0x1624: 0x0003, 0x1625: 0x0003, 0x1626: 0x0003, 0x1627: 0x0003, + 0x162a: 0x0003, 0x162b: 0x0003, 0x162e: 0x0003, 0x162f: 0x0003, + // Block 0x59, offset 0x1640 + 0x1642: 0x0003, 0x1643: 0x0003, + 0x1646: 0x0003, 0x1647: 0x0003, + 0x1655: 0x0003, + 0x1659: 0x0003, + 0x1665: 0x0003, + 0x167f: 0x0003, + // Block 0x5a, offset 0x1680 + 0x1692: 0x0003, + 0x169a: 0x0002, 0x169b: 0x0002, + 0x16a9: 0x0002, + 0x16aa: 0x0002, + // Block 0x5b, offset 0x16c0 + 0x16e9: 0x0002, + 0x16ea: 0x0002, 0x16eb: 0x0002, 0x16ec: 0x0002, + 0x16f0: 0x0002, 0x16f3: 0x0002, + // Block 0x5c, offset 0x1700 + 0x1720: 0x0003, 0x1721: 0x0003, 0x1722: 0x0003, 0x1723: 0x0003, + 0x1724: 0x0003, 0x1725: 0x0003, 0x1726: 0x0003, 0x1727: 0x0003, 0x1728: 0x0003, 0x1729: 0x0003, + 0x172a: 0x0003, 0x172b: 0x0003, 0x172c: 0x0003, 0x172d: 0x0003, 0x172e: 0x0003, 0x172f: 0x0003, + 0x1730: 0x0003, 0x1731: 0x0003, 0x1732: 0x0003, 0x1733: 0x0003, 0x1734: 0x0003, 0x1735: 0x0003, + 0x1736: 0x0003, 0x1737: 0x0003, 0x1738: 0x0003, 0x1739: 0x0003, 0x173a: 0x0003, 0x173b: 0x0003, + 0x173c: 0x0003, 0x173d: 0x0003, 0x173e: 0x0003, 0x173f: 0x0003, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0003, 0x1741: 0x0003, 0x1742: 0x0003, 0x1743: 0x0003, 0x1744: 0x0003, 0x1745: 0x0003, + 0x1746: 0x0003, 0x1747: 0x0003, 0x1748: 0x0003, 0x1749: 0x0003, 0x174a: 0x0003, 0x174b: 0x0003, + 0x174c: 0x0003, 0x174d: 0x0003, 0x174e: 0x0003, 0x174f: 0x0003, 0x1750: 0x0003, 0x1751: 0x0003, + 0x1752: 0x0003, 0x1753: 0x0003, 0x1754: 0x0003, 0x1755: 0x0003, 0x1756: 0x0003, 0x1757: 0x0003, + 0x1758: 0x0003, 0x1759: 0x0003, 0x175a: 0x0003, 0x175b: 0x0003, 0x175c: 0x0003, 0x175d: 0x0003, + 0x175e: 0x0003, 0x175f: 0x0003, 0x1760: 0x0003, 0x1761: 0x0003, 0x1762: 0x0003, 0x1763: 0x0003, + 0x1764: 0x0003, 0x1765: 0x0003, 0x1766: 0x0003, 0x1767: 0x0003, 0x1768: 0x0003, 0x1769: 0x0003, + 0x176a: 0x0003, 0x176b: 0x0003, 0x176c: 0x0003, 0x176d: 0x0003, 0x176e: 0x0003, 0x176f: 0x0003, + 0x1770: 0x0003, 0x1771: 0x0003, 0x1772: 0x0003, 0x1773: 0x0003, 0x1774: 0x0003, 0x1775: 0x0003, + 0x1776: 0x0003, 0x1777: 0x0003, 0x1778: 0x0003, 0x1779: 0x0003, 0x177a: 0x0003, 0x177b: 0x0003, + 0x177c: 0x0003, 0x177d: 0x0003, 0x177e: 0x0003, 0x177f: 0x0003, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0003, 0x1781: 0x0003, 0x1782: 0x0003, 0x1783: 0x0003, 0x1784: 0x0003, 0x1785: 0x0003, + 0x1786: 0x0003, 0x1787: 0x0003, 0x1788: 0x0003, 0x1789: 0x0003, 0x178a: 0x0003, 0x178b: 0x0003, + 0x178c: 0x0003, 0x178d: 0x0003, 0x178e: 0x0003, 0x178f: 0x0003, 0x1790: 0x0003, 0x1791: 0x0003, + 0x1792: 0x0003, 0x1793: 0x0003, 0x1794: 0x0003, 0x1795: 0x0003, 0x1796: 0x0003, 0x1797: 0x0003, + 0x1798: 0x0003, 0x1799: 0x0003, 0x179a: 0x0003, 0x179b: 0x0003, 0x179c: 0x0003, 0x179d: 0x0003, + 0x179e: 0x0003, 0x179f: 0x0003, 0x17a0: 0x0003, 0x17a1: 0x0003, 0x17a2: 0x0003, 0x17a3: 0x0003, + 0x17a4: 0x0003, 0x17a5: 0x0003, 0x17a6: 0x0003, 0x17a7: 0x0003, 0x17a8: 0x0003, 0x17a9: 0x0003, + 0x17ab: 0x0003, 0x17ac: 0x0003, 0x17ad: 0x0003, 0x17ae: 0x0003, 0x17af: 0x0003, + 0x17b0: 0x0003, 0x17b1: 0x0003, 0x17b2: 0x0003, 0x17b3: 0x0003, 0x17b4: 0x0003, 0x17b5: 0x0003, + 0x17b6: 0x0003, 0x17b7: 0x0003, 0x17b8: 0x0003, 0x17b9: 0x0003, 0x17ba: 0x0003, 0x17bb: 0x0003, + 0x17bc: 0x0003, 0x17bd: 0x0003, 0x17be: 0x0003, 0x17bf: 0x0003, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x0003, 0x17c1: 0x0003, 0x17c2: 0x0003, 0x17c3: 0x0003, 0x17c4: 0x0003, 0x17c5: 0x0003, + 0x17c6: 0x0003, 0x17c7: 0x0003, 0x17c8: 0x0003, 0x17c9: 0x0003, 0x17ca: 0x0003, 0x17cb: 0x0003, + 0x17d0: 0x0003, 0x17d1: 0x0003, + 0x17d2: 0x0003, 0x17d3: 0x0003, 0x17d4: 0x0003, 0x17d5: 0x0003, 0x17d6: 0x0003, 0x17d7: 0x0003, + 0x17d8: 0x0003, 0x17d9: 0x0003, 0x17da: 0x0003, 0x17db: 0x0003, 0x17dc: 0x0003, 0x17dd: 0x0003, + 0x17de: 0x0003, 0x17df: 0x0003, 0x17e0: 0x0003, 0x17e1: 0x0003, 0x17e2: 0x0003, 0x17e3: 0x0003, + 0x17e4: 0x0003, 0x17e5: 0x0003, 0x17e6: 0x0003, 0x17e7: 0x0003, 0x17e8: 0x0003, 0x17e9: 0x0003, + 0x17ea: 0x0003, 0x17eb: 0x0003, 0x17ec: 0x0003, 0x17ed: 0x0003, 0x17ee: 0x0003, 0x17ef: 0x0003, + 0x17f0: 0x0003, 0x17f1: 0x0003, 0x17f2: 0x0003, 0x17f3: 0x0003, + // Block 0x60, offset 0x1800 + 0x1800: 0x0003, 0x1801: 0x0003, 0x1802: 0x0003, 0x1803: 0x0003, 0x1804: 0x0003, 0x1805: 0x0003, + 0x1806: 0x0003, 0x1807: 0x0003, 0x1808: 0x0003, 0x1809: 0x0003, 0x180a: 0x0003, 0x180b: 0x0003, + 0x180c: 0x0003, 0x180d: 0x0003, 0x180e: 0x0003, 0x180f: 0x0003, + 0x1812: 0x0003, 0x1813: 0x0003, 0x1814: 0x0003, 0x1815: 0x0003, + 0x1820: 0x0003, 0x1821: 0x0003, 0x1823: 0x0003, + 0x1824: 0x0003, 0x1825: 0x0003, 0x1826: 0x0003, 0x1827: 0x0003, 0x1828: 0x0003, 0x1829: 0x0003, + 0x1832: 0x0003, 0x1833: 0x0003, + 0x1836: 0x0003, 0x1837: 0x0003, + 0x183c: 0x0003, 0x183d: 0x0003, + // Block 0x61, offset 0x1840 + 0x1840: 0x0003, 0x1841: 0x0003, + 0x1846: 0x0003, 0x1847: 0x0003, 0x1848: 0x0003, 0x184b: 0x0003, + 0x184e: 0x0003, 0x184f: 0x0003, 0x1850: 0x0003, 0x1851: 0x0003, + 0x1862: 0x0003, 0x1863: 0x0003, + 0x1864: 0x0003, 0x1865: 0x0003, + 0x186f: 0x0003, + 0x187d: 0x0002, 0x187e: 0x0002, + // Block 0x62, offset 0x1880 + 0x1885: 0x0003, + 0x1886: 0x0003, 0x1889: 0x0003, + 0x188e: 0x0003, 0x188f: 0x0003, + 0x1894: 0x0002, 0x1895: 0x0002, + 0x189c: 0x0003, + 0x189e: 0x0003, + 0x18b0: 0x0002, 0x18b1: 0x0002, 0x18b2: 0x0002, 0x18b3: 0x0002, 0x18b4: 0x0002, 0x18b5: 0x0002, + 0x18b6: 0x0002, 0x18b7: 0x0002, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0003, 0x18c2: 0x0003, + 0x18c8: 0x0002, 0x18c9: 0x0002, 0x18ca: 0x0002, 0x18cb: 0x0002, + 0x18cc: 0x0002, 0x18cd: 0x0002, 0x18ce: 0x0002, 0x18cf: 0x0002, 0x18d0: 0x0002, 0x18d1: 0x0002, + 0x18d2: 0x0002, 0x18d3: 0x0002, + 0x18e0: 0x0003, 0x18e1: 0x0003, 0x18e3: 0x0003, + 0x18e4: 0x0003, 0x18e5: 0x0003, 0x18e7: 0x0003, 0x18e8: 0x0003, 0x18e9: 0x0003, + 0x18ea: 0x0003, 0x18ec: 0x0003, 0x18ed: 0x0003, 0x18ef: 0x0003, + 0x18ff: 0x0002, + // Block 0x64, offset 0x1900 + 0x190a: 0x0002, 0x190b: 0x0002, + 0x190c: 0x0002, 0x190d: 0x0002, 0x190e: 0x0002, 0x190f: 0x0002, + 0x1913: 0x0002, + 0x191e: 0x0003, 0x191f: 0x0003, 0x1921: 0x0002, + 0x192a: 0x0002, 0x192b: 0x0002, + 0x193d: 0x0002, 0x193e: 0x0002, 0x193f: 0x0003, + // Block 0x65, offset 0x1940 + 0x1944: 0x0002, 0x1945: 0x0002, + 0x1946: 0x0003, 0x1947: 0x0003, 0x1948: 0x0003, 0x1949: 0x0003, 0x194a: 0x0003, 0x194b: 0x0003, + 0x194c: 0x0003, 0x194d: 0x0003, 0x194e: 0x0002, 0x194f: 0x0003, 0x1950: 0x0003, 0x1951: 0x0003, + 0x1952: 0x0003, 0x1953: 0x0003, 0x1954: 0x0002, 0x1955: 0x0003, 0x1956: 0x0003, 0x1957: 0x0003, + 0x1958: 0x0003, 0x1959: 0x0003, 0x195a: 0x0003, 0x195b: 0x0003, 0x195c: 0x0003, 0x195d: 0x0003, + 0x195e: 0x0003, 0x195f: 0x0003, 0x1960: 0x0003, 0x1961: 0x0003, 0x1963: 0x0003, + 0x1968: 0x0003, 0x1969: 0x0003, + 0x196a: 0x0002, 0x196b: 0x0003, 0x196c: 0x0003, 0x196d: 0x0003, 0x196e: 0x0003, 0x196f: 0x0003, + 0x1970: 0x0003, 0x1971: 0x0003, 0x1972: 0x0002, 0x1973: 0x0002, 0x1974: 0x0003, 0x1975: 0x0002, + 0x1976: 0x0003, 0x1977: 0x0003, 0x1978: 0x0003, 0x1979: 0x0003, 0x197a: 0x0002, 0x197b: 0x0003, + 0x197c: 0x0003, 0x197d: 0x0002, 0x197e: 0x0003, 0x197f: 0x0003, + // Block 0x66, offset 0x1980 + 0x1985: 0x0002, + 0x198a: 0x0002, 0x198b: 0x0002, + 0x19a8: 0x0002, + 0x19bd: 0x0003, + // Block 0x67, offset 0x19c0 + 0x19cc: 0x0002, 0x19ce: 0x0002, + 0x19d3: 0x0002, 0x19d4: 0x0002, 0x19d5: 0x0002, 0x19d7: 0x0002, + 0x19f6: 0x0003, 0x19f7: 0x0003, 0x19f8: 0x0003, 0x19f9: 0x0003, 0x19fa: 0x0003, 0x19fb: 0x0003, + 0x19fc: 0x0003, 0x19fd: 0x0003, 0x19fe: 0x0003, 0x19ff: 0x0003, + // Block 0x68, offset 0x1a00 + 0x1a15: 0x0002, 0x1a16: 0x0002, 0x1a17: 0x0002, + 0x1a30: 0x0002, + 0x1a3f: 0x0002, + // Block 0x69, offset 0x1a40 + 0x1a5b: 0x0002, 0x1a5c: 0x0002, + // Block 0x6a, offset 0x1a80 + 0x1a90: 0x0002, + 0x1a95: 0x0002, 0x1a96: 0x0003, 0x1a97: 0x0003, + 0x1a98: 0x0003, 0x1a99: 0x0003, + // Block 0x6b, offset 0x1ac0 + 0x1aef: 0x0001, + 0x1af0: 0x0001, 0x1af1: 0x0001, + // Block 0x6c, offset 0x1b00 + 0x1b3f: 0x0001, + // Block 0x6d, offset 0x1b40 + 0x1b60: 0x0001, 0x1b61: 0x0001, 0x1b62: 0x0001, 0x1b63: 0x0001, + 0x1b64: 0x0001, 0x1b65: 0x0001, 0x1b66: 0x0001, 0x1b67: 0x0001, 0x1b68: 0x0001, 0x1b69: 0x0001, + 0x1b6a: 0x0001, 0x1b6b: 0x0001, 0x1b6c: 0x0001, 0x1b6d: 0x0001, 0x1b6e: 0x0001, 0x1b6f: 0x0001, + 0x1b70: 0x0001, 0x1b71: 0x0001, 0x1b72: 0x0001, 0x1b73: 0x0001, 0x1b74: 0x0001, 0x1b75: 0x0001, + 0x1b76: 0x0001, 0x1b77: 0x0001, 0x1b78: 0x0001, 0x1b79: 0x0001, 0x1b7a: 0x0001, 0x1b7b: 0x0001, + 0x1b7c: 0x0001, 0x1b7d: 0x0001, 0x1b7e: 0x0001, 0x1b7f: 0x0001, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x0002, 0x1b81: 0x0002, 0x1b82: 0x0002, 0x1b83: 0x0002, 0x1b84: 0x0002, 0x1b85: 0x0002, + 0x1b86: 0x0002, 0x1b87: 0x0002, 0x1b88: 0x0002, 0x1b89: 0x0002, 0x1b8a: 0x0002, 0x1b8b: 0x0002, + 0x1b8c: 0x0002, 0x1b8d: 0x0002, 0x1b8e: 0x0002, 0x1b8f: 0x0002, 0x1b90: 0x0002, 0x1b91: 0x0002, + 0x1b92: 0x0002, 0x1b93: 0x0002, 0x1b94: 0x0002, 0x1b95: 0x0002, 0x1b96: 0x0002, 0x1b97: 0x0002, + 0x1b98: 0x0002, 0x1b99: 0x0002, 0x1b9b: 0x0002, 0x1b9c: 0x0002, 0x1b9d: 0x0002, + 0x1b9e: 0x0002, 0x1b9f: 0x0002, 0x1ba0: 0x0002, 0x1ba1: 0x0002, 0x1ba2: 0x0002, 0x1ba3: 0x0002, + 0x1ba4: 0x0002, 0x1ba5: 0x0002, 0x1ba6: 0x0002, 0x1ba7: 0x0002, 0x1ba8: 0x0002, 0x1ba9: 0x0002, + 0x1baa: 0x0002, 0x1bab: 0x0002, 0x1bac: 0x0002, 0x1bad: 0x0002, 0x1bae: 0x0002, 0x1baf: 0x0002, + 0x1bb0: 0x0002, 0x1bb1: 0x0002, 0x1bb2: 0x0002, 0x1bb3: 0x0002, 0x1bb4: 0x0002, 0x1bb5: 0x0002, + 0x1bb6: 0x0002, 0x1bb7: 0x0002, 0x1bb8: 0x0002, 0x1bb9: 0x0002, 0x1bba: 0x0002, 0x1bbb: 0x0002, + 0x1bbc: 0x0002, 0x1bbd: 0x0002, 0x1bbe: 0x0002, 0x1bbf: 0x0002, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0002, 0x1bc1: 0x0002, 0x1bc2: 0x0002, 0x1bc3: 0x0002, 0x1bc4: 0x0002, 0x1bc5: 0x0002, + 0x1bc6: 0x0002, 0x1bc7: 0x0002, 0x1bc8: 0x0002, 0x1bc9: 0x0002, 0x1bca: 0x0002, 0x1bcb: 0x0002, + 0x1bcc: 0x0002, 0x1bcd: 0x0002, 0x1bce: 0x0002, 0x1bcf: 0x0002, 0x1bd0: 0x0002, 0x1bd1: 0x0002, + 0x1bd2: 0x0002, 0x1bd3: 0x0002, 0x1bd4: 0x0002, 0x1bd5: 0x0002, 0x1bd6: 0x0002, 0x1bd7: 0x0002, + 0x1bd8: 0x0002, 0x1bd9: 0x0002, 0x1bda: 0x0002, 0x1bdb: 0x0002, 0x1bdc: 0x0002, 0x1bdd: 0x0002, + 0x1bde: 0x0002, 0x1bdf: 0x0002, 0x1be0: 0x0002, 0x1be1: 0x0002, 0x1be2: 0x0002, 0x1be3: 0x0002, + 0x1be4: 0x0002, 0x1be5: 0x0002, 0x1be6: 0x0002, 0x1be7: 0x0002, 0x1be8: 0x0002, 0x1be9: 0x0002, + 0x1bea: 0x0002, 0x1beb: 0x0002, 0x1bec: 0x0002, 0x1bed: 0x0002, 0x1bee: 0x0002, 0x1bef: 0x0002, + 0x1bf0: 0x0002, 0x1bf1: 0x0002, 0x1bf2: 0x0002, 0x1bf3: 0x0002, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0002, 0x1c01: 0x0002, 0x1c02: 0x0002, 0x1c03: 0x0002, 0x1c04: 0x0002, 0x1c05: 0x0002, + 0x1c06: 0x0002, 0x1c07: 0x0002, 0x1c08: 0x0002, 0x1c09: 0x0002, 0x1c0a: 0x0002, 0x1c0b: 0x0002, + 0x1c0c: 0x0002, 0x1c0d: 0x0002, 0x1c0e: 0x0002, 0x1c0f: 0x0002, 0x1c10: 0x0002, 0x1c11: 0x0002, + 0x1c12: 0x0002, 0x1c13: 0x0002, 0x1c14: 0x0002, 0x1c15: 0x0002, + 0x1c30: 0x0002, 0x1c31: 0x0002, 0x1c32: 0x0002, 0x1c33: 0x0002, 0x1c34: 0x0002, 0x1c35: 0x0002, + 0x1c36: 0x0002, 0x1c37: 0x0002, 0x1c38: 0x0002, 0x1c39: 0x0002, 0x1c3a: 0x0002, 0x1c3b: 0x0002, + 0x1c3c: 0x0002, 0x1c3d: 0x0002, 0x1c3e: 0x0002, 0x1c3f: 0x0002, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0002, 0x1c41: 0x0002, 0x1c42: 0x0002, 0x1c43: 0x0002, 0x1c44: 0x0002, 0x1c45: 0x0002, + 0x1c46: 0x0002, 0x1c47: 0x0002, 0x1c48: 0x0002, 0x1c49: 0x0002, 0x1c4a: 0x0002, 0x1c4b: 0x0002, + 0x1c4c: 0x0002, 0x1c4d: 0x0002, 0x1c4e: 0x0002, 0x1c4f: 0x0002, 0x1c50: 0x0002, 0x1c51: 0x0002, + 0x1c52: 0x0002, 0x1c53: 0x0002, 0x1c54: 0x0002, 0x1c55: 0x0002, 0x1c56: 0x0002, 0x1c57: 0x0002, + 0x1c58: 0x0002, 0x1c59: 0x0002, 0x1c5a: 0x0002, 0x1c5b: 0x0002, 0x1c5c: 0x0002, 0x1c5d: 0x0002, + 0x1c5e: 0x0002, 0x1c5f: 0x0002, 0x1c60: 0x0002, 0x1c61: 0x0002, 0x1c62: 0x0002, 0x1c63: 0x0002, + 0x1c64: 0x0002, 0x1c65: 0x0002, 0x1c66: 0x0002, 0x1c67: 0x0002, 0x1c68: 0x0002, 0x1c69: 0x0002, + 0x1c6a: 0x0001, 0x1c6b: 0x0001, 0x1c6c: 0x0001, 0x1c6d: 0x0001, 0x1c6e: 0x0002, 0x1c6f: 0x0002, + 0x1c70: 0x0002, 0x1c71: 0x0002, 0x1c72: 0x0002, 0x1c73: 0x0002, 0x1c74: 0x0002, 0x1c75: 0x0002, + 0x1c76: 0x0002, 0x1c77: 0x0002, 0x1c78: 0x0002, 0x1c79: 0x0002, 0x1c7a: 0x0002, 0x1c7b: 0x0002, + 0x1c7c: 0x0002, 0x1c7d: 0x0002, 0x1c7e: 0x0002, + // Block 0x72, offset 0x1c80 + 0x1c81: 0x0002, 0x1c82: 0x0002, 0x1c83: 0x0002, 0x1c84: 0x0002, 0x1c85: 0x0002, + 0x1c86: 0x0002, 0x1c87: 0x0002, 0x1c88: 0x0002, 0x1c89: 0x0002, 0x1c8a: 0x0002, 0x1c8b: 0x0002, + 0x1c8c: 0x0002, 0x1c8d: 0x0002, 0x1c8e: 0x0002, 0x1c8f: 0x0002, 0x1c90: 0x0002, 0x1c91: 0x0002, + 0x1c92: 0x0002, 0x1c93: 0x0002, 0x1c94: 0x0002, 0x1c95: 0x0002, 0x1c96: 0x0002, 0x1c97: 0x0002, + 0x1c98: 0x0002, 0x1c99: 0x0002, 0x1c9a: 0x0002, 0x1c9b: 0x0002, 0x1c9c: 0x0002, 0x1c9d: 0x0002, + 0x1c9e: 0x0002, 0x1c9f: 0x0002, 0x1ca0: 0x0002, 0x1ca1: 0x0002, 0x1ca2: 0x0002, 0x1ca3: 0x0002, + 0x1ca4: 0x0002, 0x1ca5: 0x0002, 0x1ca6: 0x0002, 0x1ca7: 0x0002, 0x1ca8: 0x0002, 0x1ca9: 0x0002, + 0x1caa: 0x0002, 0x1cab: 0x0002, 0x1cac: 0x0002, 0x1cad: 0x0002, 0x1cae: 0x0002, 0x1caf: 0x0002, + 0x1cb0: 0x0002, 0x1cb1: 0x0002, 0x1cb2: 0x0002, 0x1cb3: 0x0002, 0x1cb4: 0x0002, 0x1cb5: 0x0002, + 0x1cb6: 0x0002, 0x1cb7: 0x0002, 0x1cb8: 0x0002, 0x1cb9: 0x0002, 0x1cba: 0x0002, 0x1cbb: 0x0002, + 0x1cbc: 0x0002, 0x1cbd: 0x0002, 0x1cbe: 0x0002, 0x1cbf: 0x0002, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0002, 0x1cc1: 0x0002, 0x1cc2: 0x0002, 0x1cc3: 0x0002, 0x1cc4: 0x0002, 0x1cc5: 0x0002, + 0x1cc6: 0x0002, 0x1cc7: 0x0002, 0x1cc8: 0x0002, 0x1cc9: 0x0002, 0x1cca: 0x0002, 0x1ccb: 0x0002, + 0x1ccc: 0x0002, 0x1ccd: 0x0002, 0x1cce: 0x0002, 0x1ccf: 0x0002, 0x1cd0: 0x0002, 0x1cd1: 0x0002, + 0x1cd2: 0x0002, 0x1cd3: 0x0002, 0x1cd4: 0x0002, 0x1cd5: 0x0002, 0x1cd6: 0x0002, + 0x1cd9: 0x0001, 0x1cda: 0x0001, 0x1cdb: 0x0002, 0x1cdc: 0x0002, 0x1cdd: 0x0002, + 0x1cde: 0x0002, 0x1cdf: 0x0002, 0x1ce0: 0x0002, 0x1ce1: 0x0002, 0x1ce2: 0x0002, 0x1ce3: 0x0002, + 0x1ce4: 0x0002, 0x1ce5: 0x0002, 0x1ce6: 0x0002, 0x1ce7: 0x0002, 0x1ce8: 0x0002, 0x1ce9: 0x0002, + 0x1cea: 0x0002, 0x1ceb: 0x0002, 0x1cec: 0x0002, 0x1ced: 0x0002, 0x1cee: 0x0002, 0x1cef: 0x0002, + 0x1cf0: 0x0002, 0x1cf1: 0x0002, 0x1cf2: 0x0002, 0x1cf3: 0x0002, 0x1cf4: 0x0002, 0x1cf5: 0x0002, + 0x1cf6: 0x0002, 0x1cf7: 0x0002, 0x1cf8: 0x0002, 0x1cf9: 0x0002, 0x1cfa: 0x0002, 0x1cfb: 0x0002, + 0x1cfc: 0x0002, 0x1cfd: 0x0002, 0x1cfe: 0x0002, 0x1cff: 0x0002, + // Block 0x74, offset 0x1d00 + 0x1d05: 0x0002, + 0x1d06: 0x0002, 0x1d07: 0x0002, 0x1d08: 0x0002, 0x1d09: 0x0002, 0x1d0a: 0x0002, 0x1d0b: 0x0002, + 0x1d0c: 0x0002, 0x1d0d: 0x0002, 0x1d0e: 0x0002, 0x1d0f: 0x0002, 0x1d10: 0x0002, 0x1d11: 0x0002, + 0x1d12: 0x0002, 0x1d13: 0x0002, 0x1d14: 0x0002, 0x1d15: 0x0002, 0x1d16: 0x0002, 0x1d17: 0x0002, + 0x1d18: 0x0002, 0x1d19: 0x0002, 0x1d1a: 0x0002, 0x1d1b: 0x0002, 0x1d1c: 0x0002, 0x1d1d: 0x0002, + 0x1d1e: 0x0002, 0x1d1f: 0x0002, 0x1d20: 0x0002, 0x1d21: 0x0002, 0x1d22: 0x0002, 0x1d23: 0x0002, + 0x1d24: 0x0002, 0x1d25: 0x0002, 0x1d26: 0x0002, 0x1d27: 0x0002, 0x1d28: 0x0002, 0x1d29: 0x0002, + 0x1d2a: 0x0002, 0x1d2b: 0x0002, 0x1d2c: 0x0002, 0x1d2d: 0x0002, 0x1d2e: 0x0002, 0x1d2f: 0x0002, + 0x1d31: 0x0002, 0x1d32: 0x0002, 0x1d33: 0x0002, 0x1d34: 0x0002, 0x1d35: 0x0002, + 0x1d36: 0x0002, 0x1d37: 0x0002, 0x1d38: 0x0002, 0x1d39: 0x0002, 0x1d3a: 0x0002, 0x1d3b: 0x0002, + 0x1d3c: 0x0002, 0x1d3d: 0x0002, 0x1d3e: 0x0002, 0x1d3f: 0x0002, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0002, 0x1d41: 0x0002, 0x1d42: 0x0002, 0x1d43: 0x0002, 0x1d44: 0x0002, 0x1d45: 0x0002, + 0x1d46: 0x0002, 0x1d47: 0x0002, 0x1d48: 0x0002, 0x1d49: 0x0002, 0x1d4a: 0x0002, 0x1d4b: 0x0002, + 0x1d4c: 0x0002, 0x1d4d: 0x0002, 0x1d4e: 0x0002, 0x1d50: 0x0002, 0x1d51: 0x0002, + 0x1d52: 0x0002, 0x1d53: 0x0002, 0x1d54: 0x0002, 0x1d55: 0x0002, 0x1d56: 0x0002, 0x1d57: 0x0002, + 0x1d58: 0x0002, 0x1d59: 0x0002, 0x1d5a: 0x0002, 0x1d5b: 0x0002, 0x1d5c: 0x0002, 0x1d5d: 0x0002, + 0x1d5e: 0x0002, 0x1d5f: 0x0002, 0x1d60: 0x0002, 0x1d61: 0x0002, 0x1d62: 0x0002, 0x1d63: 0x0002, + 0x1d64: 0x0002, 0x1d65: 0x0002, 0x1d66: 0x0002, 0x1d67: 0x0002, 0x1d68: 0x0002, 0x1d69: 0x0002, + 0x1d6a: 0x0002, 0x1d6b: 0x0002, 0x1d6c: 0x0002, 0x1d6d: 0x0002, 0x1d6e: 0x0002, 0x1d6f: 0x0002, + 0x1d70: 0x0002, 0x1d71: 0x0002, 0x1d72: 0x0002, 0x1d73: 0x0002, 0x1d74: 0x0002, 0x1d75: 0x0002, + 0x1d76: 0x0002, 0x1d77: 0x0002, 0x1d78: 0x0002, 0x1d79: 0x0002, 0x1d7a: 0x0002, 0x1d7b: 0x0002, + 0x1d7c: 0x0002, 0x1d7d: 0x0002, 0x1d7e: 0x0002, 0x1d7f: 0x0002, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0002, 0x1d81: 0x0002, 0x1d82: 0x0002, 0x1d83: 0x0002, 0x1d84: 0x0002, 0x1d85: 0x0002, + 0x1d86: 0x0002, 0x1d87: 0x0002, 0x1d88: 0x0002, 0x1d89: 0x0002, 0x1d8a: 0x0002, 0x1d8b: 0x0002, + 0x1d8c: 0x0002, 0x1d8d: 0x0002, 0x1d8e: 0x0002, 0x1d8f: 0x0002, 0x1d90: 0x0002, 0x1d91: 0x0002, + 0x1d92: 0x0002, 0x1d93: 0x0002, 0x1d94: 0x0002, 0x1d95: 0x0002, 0x1d96: 0x0002, 0x1d97: 0x0002, + 0x1d98: 0x0002, 0x1d99: 0x0002, 0x1d9a: 0x0002, 0x1d9b: 0x0002, 0x1d9c: 0x0002, 0x1d9d: 0x0002, + 0x1d9e: 0x0002, 0x1d9f: 0x0002, 0x1da0: 0x0002, 0x1da1: 0x0002, 0x1da2: 0x0002, 0x1da3: 0x0002, + 0x1da4: 0x0002, 0x1da5: 0x0002, + 0x1daf: 0x0002, + 0x1db0: 0x0002, 0x1db1: 0x0002, 0x1db2: 0x0002, 0x1db3: 0x0002, 0x1db4: 0x0002, 0x1db5: 0x0002, + 0x1db6: 0x0002, 0x1db7: 0x0002, 0x1db8: 0x0002, 0x1db9: 0x0002, 0x1dba: 0x0002, 0x1dbb: 0x0002, + 0x1dbc: 0x0002, 0x1dbd: 0x0002, 0x1dbe: 0x0002, 0x1dbf: 0x0002, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x0002, 0x1dc1: 0x0002, 0x1dc2: 0x0002, 0x1dc3: 0x0002, 0x1dc4: 0x0002, 0x1dc5: 0x0002, + 0x1dc6: 0x0002, 0x1dc7: 0x0002, 0x1dc8: 0x0002, 0x1dc9: 0x0002, 0x1dca: 0x0002, 0x1dcb: 0x0002, + 0x1dcc: 0x0002, 0x1dcd: 0x0002, 0x1dce: 0x0002, 0x1dcf: 0x0002, 0x1dd0: 0x0002, 0x1dd1: 0x0002, + 0x1dd2: 0x0002, 0x1dd3: 0x0002, 0x1dd4: 0x0002, 0x1dd5: 0x0002, 0x1dd6: 0x0002, 0x1dd7: 0x0002, + 0x1dd8: 0x0002, 0x1dd9: 0x0002, 0x1dda: 0x0002, 0x1ddb: 0x0002, 0x1ddc: 0x0002, 0x1ddd: 0x0002, + 0x1dde: 0x0002, 0x1de0: 0x0002, 0x1de1: 0x0002, 0x1de2: 0x0002, 0x1de3: 0x0002, + 0x1de4: 0x0002, 0x1de5: 0x0002, 0x1de6: 0x0002, 0x1de7: 0x0002, 0x1de8: 0x0002, 0x1de9: 0x0002, + 0x1dea: 0x0002, 0x1deb: 0x0002, 0x1dec: 0x0002, 0x1ded: 0x0002, 0x1dee: 0x0002, 0x1def: 0x0002, + 0x1df0: 0x0002, 0x1df1: 0x0002, 0x1df2: 0x0002, 0x1df3: 0x0002, 0x1df4: 0x0002, 0x1df5: 0x0002, + 0x1df6: 0x0002, 0x1df7: 0x0002, 0x1df8: 0x0002, 0x1df9: 0x0002, 0x1dfa: 0x0002, 0x1dfb: 0x0002, + 0x1dfc: 0x0002, 0x1dfd: 0x0002, 0x1dfe: 0x0002, 0x1dff: 0x0002, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x0002, 0x1e01: 0x0002, 0x1e02: 0x0002, 0x1e03: 0x0002, 0x1e04: 0x0002, 0x1e05: 0x0002, + 0x1e06: 0x0002, 0x1e07: 0x0002, 0x1e08: 0x0003, 0x1e09: 0x0003, 0x1e0a: 0x0003, 0x1e0b: 0x0003, + 0x1e0c: 0x0003, 0x1e0d: 0x0003, 0x1e0e: 0x0003, 0x1e0f: 0x0003, 0x1e10: 0x0002, 0x1e11: 0x0002, + 0x1e12: 0x0002, 0x1e13: 0x0002, 0x1e14: 0x0002, 0x1e15: 0x0002, 0x1e16: 0x0002, 0x1e17: 0x0002, + 0x1e18: 0x0002, 0x1e19: 0x0002, 0x1e1a: 0x0002, 0x1e1b: 0x0002, 0x1e1c: 0x0002, 0x1e1d: 0x0002, + 0x1e1e: 0x0002, 0x1e1f: 0x0002, 0x1e20: 0x0002, 0x1e21: 0x0002, 0x1e22: 0x0002, 0x1e23: 0x0002, + 0x1e24: 0x0002, 0x1e25: 0x0002, 0x1e26: 0x0002, 0x1e27: 0x0002, 0x1e28: 0x0002, 0x1e29: 0x0002, + 0x1e2a: 0x0002, 0x1e2b: 0x0002, 0x1e2c: 0x0002, 0x1e2d: 0x0002, 0x1e2e: 0x0002, 0x1e2f: 0x0002, + 0x1e30: 0x0002, 0x1e31: 0x0002, 0x1e32: 0x0002, 0x1e33: 0x0002, 0x1e34: 0x0002, 0x1e35: 0x0002, + 0x1e36: 0x0002, 0x1e37: 0x0002, 0x1e38: 0x0002, 0x1e39: 0x0002, 0x1e3a: 0x0002, 0x1e3b: 0x0002, + 0x1e3c: 0x0002, 0x1e3d: 0x0002, 0x1e3e: 0x0002, 0x1e3f: 0x0002, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x0002, 0x1e41: 0x0002, 0x1e42: 0x0002, 0x1e43: 0x0002, 0x1e44: 0x0002, 0x1e45: 0x0002, + 0x1e46: 0x0002, 0x1e47: 0x0002, 0x1e48: 0x0002, 0x1e49: 0x0002, 0x1e4a: 0x0002, 0x1e4b: 0x0002, + 0x1e4c: 0x0002, 0x1e50: 0x0002, 0x1e51: 0x0002, + 0x1e52: 0x0002, 0x1e53: 0x0002, 0x1e54: 0x0002, 0x1e55: 0x0002, 0x1e56: 0x0002, 0x1e57: 0x0002, + 0x1e58: 0x0002, 0x1e59: 0x0002, 0x1e5a: 0x0002, 0x1e5b: 0x0002, 0x1e5c: 0x0002, 0x1e5d: 0x0002, + 0x1e5e: 0x0002, 0x1e5f: 0x0002, 0x1e60: 0x0002, 0x1e61: 0x0002, 0x1e62: 0x0002, 0x1e63: 0x0002, + 0x1e64: 0x0002, 0x1e65: 0x0002, 0x1e66: 0x0002, 0x1e67: 0x0002, 0x1e68: 0x0002, 0x1e69: 0x0002, + 0x1e6a: 0x0002, 0x1e6b: 0x0002, 0x1e6c: 0x0002, 0x1e6d: 0x0002, 0x1e6e: 0x0002, 0x1e6f: 0x0002, + 0x1e70: 0x0002, 0x1e71: 0x0002, 0x1e72: 0x0002, 0x1e73: 0x0002, 0x1e74: 0x0002, 0x1e75: 0x0002, + 0x1e76: 0x0002, 0x1e77: 0x0002, 0x1e78: 0x0002, 0x1e79: 0x0002, 0x1e7a: 0x0002, 0x1e7b: 0x0002, + 0x1e7c: 0x0002, 0x1e7d: 0x0002, 0x1e7e: 0x0002, 0x1e7f: 0x0002, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x0002, 0x1e81: 0x0002, 0x1e82: 0x0002, 0x1e83: 0x0002, 0x1e84: 0x0002, 0x1e85: 0x0002, + 0x1e86: 0x0002, + // Block 0x7b, offset 0x1ec0 + 0x1eef: 0x0001, + 0x1ef0: 0x0001, 0x1ef1: 0x0001, 0x1ef2: 0x0001, 0x1ef4: 0x0001, 0x1ef5: 0x0001, + 0x1ef6: 0x0001, 0x1ef7: 0x0001, 0x1ef8: 0x0001, 0x1ef9: 0x0001, 0x1efa: 0x0001, 0x1efb: 0x0001, + 0x1efc: 0x0001, 0x1efd: 0x0001, + // Block 0x7c, offset 0x1f00 + 0x1f1e: 0x0001, 0x1f1f: 0x0001, + // Block 0x7d, offset 0x1f40 + 0x1f70: 0x0001, 0x1f71: 0x0001, + // Block 0x7e, offset 0x1f80 + 0x1f82: 0x0001, + 0x1f86: 0x0001, 0x1f8b: 0x0001, + 0x1fa5: 0x0001, 0x1fa6: 0x0001, + 0x1fac: 0x0001, + // Block 0x7f, offset 0x1fc0 + 0x1fc4: 0x0001, 0x1fc5: 0x0001, + 0x1fe0: 0x0001, 0x1fe1: 0x0001, 0x1fe2: 0x0001, 0x1fe3: 0x0001, + 0x1fe4: 0x0001, 0x1fe5: 0x0001, 0x1fe6: 0x0001, 0x1fe7: 0x0001, 0x1fe8: 0x0001, 0x1fe9: 0x0001, + 0x1fea: 0x0001, 0x1feb: 0x0001, 0x1fec: 0x0001, 0x1fed: 0x0001, 0x1fee: 0x0001, 0x1fef: 0x0001, + 0x1ff0: 0x0001, 0x1ff1: 0x0001, + 0x1fff: 0x0001, + // Block 0x80, offset 0x2000 + 0x2026: 0x0001, 0x2027: 0x0001, 0x2028: 0x0001, 0x2029: 0x0001, + 0x202a: 0x0001, 0x202b: 0x0001, 0x202c: 0x0001, 0x202d: 0x0001, + // Block 0x81, offset 0x2040 + 0x2047: 0x0001, 0x2048: 0x0001, 0x2049: 0x0001, 0x204a: 0x0001, 0x204b: 0x0001, + 0x204c: 0x0001, 0x204d: 0x0001, 0x204e: 0x0001, 0x204f: 0x0001, 0x2050: 0x0001, 0x2051: 0x0001, + 0x2060: 0x0002, 0x2061: 0x0002, 0x2062: 0x0002, 0x2063: 0x0002, + 0x2064: 0x0002, 0x2065: 0x0002, 0x2066: 0x0002, 0x2067: 0x0002, 0x2068: 0x0002, 0x2069: 0x0002, + 0x206a: 0x0002, 0x206b: 0x0002, 0x206c: 0x0002, 0x206d: 0x0002, 0x206e: 0x0002, 0x206f: 0x0002, + 0x2070: 0x0002, 0x2071: 0x0002, 0x2072: 0x0002, 0x2073: 0x0002, 0x2074: 0x0002, 0x2075: 0x0002, + 0x2076: 0x0002, 0x2077: 0x0002, 0x2078: 0x0002, 0x2079: 0x0002, 0x207a: 0x0002, 0x207b: 0x0002, + 0x207c: 0x0002, + // Block 0x82, offset 0x2080 + 0x2080: 0x0001, 0x2081: 0x0001, 0x2082: 0x0001, + 0x20b3: 0x0001, + 0x20b6: 0x0001, 0x20b7: 0x0001, 0x20b8: 0x0001, 0x20b9: 0x0001, + 0x20bc: 0x0001, 0x20bd: 0x0001, + // Block 0x83, offset 0x20c0 + 0x20e5: 0x0001, + // Block 0x84, offset 0x2100 + 0x2129: 0x0001, + 0x212a: 0x0001, 0x212b: 0x0001, 0x212c: 0x0001, 0x212d: 0x0001, 0x212e: 0x0001, + 0x2131: 0x0001, 0x2132: 0x0001, 0x2135: 0x0001, + 0x2136: 0x0001, + // Block 0x85, offset 0x2140 + 0x2143: 0x0001, + 0x214c: 0x0001, + 0x217c: 0x0001, + // Block 0x86, offset 0x2180 + 0x21b0: 0x0001, 0x21b2: 0x0001, 0x21b3: 0x0001, 0x21b4: 0x0001, + 0x21b7: 0x0001, 0x21b8: 0x0001, + 0x21be: 0x0001, 0x21bf: 0x0001, + // Block 0x87, offset 0x21c0 + 0x21c1: 0x0001, + 0x21ec: 0x0001, 0x21ed: 0x0001, + 0x21f6: 0x0001, + // Block 0x88, offset 0x2200 + 0x2225: 0x0001, 0x2228: 0x0001, + 0x222d: 0x0001, + // Block 0x89, offset 0x2240 + 0x2240: 0x0002, 0x2241: 0x0002, 0x2242: 0x0002, 0x2243: 0x0002, 0x2244: 0x0002, 0x2245: 0x0002, + 0x2246: 0x0002, 0x2247: 0x0002, 0x2248: 0x0002, 0x2249: 0x0002, 0x224a: 0x0002, 0x224b: 0x0002, + 0x224c: 0x0002, 0x224d: 0x0002, 0x224e: 0x0002, 0x224f: 0x0002, 0x2250: 0x0002, 0x2251: 0x0002, + 0x2252: 0x0002, 0x2253: 0x0002, 0x2254: 0x0002, 0x2255: 0x0002, 0x2256: 0x0002, 0x2257: 0x0002, + 0x2258: 0x0002, 0x2259: 0x0002, 0x225a: 0x0002, 0x225b: 0x0002, 0x225c: 0x0002, 0x225d: 0x0002, + 0x225e: 0x0002, 0x225f: 0x0002, 0x2260: 0x0002, 0x2261: 0x0002, 0x2262: 0x0002, 0x2263: 0x0002, + // Block 0x8a, offset 0x2280 + 0x229e: 0x0001, + // Block 0x8b, offset 0x22c0 + 0x22c0: 0x0001, 0x22c1: 0x0001, 0x22c2: 0x0001, 0x22c3: 0x0001, 0x22c4: 0x0001, 0x22c5: 0x0001, + 0x22c6: 0x0001, 0x22c7: 0x0001, 0x22c8: 0x0001, 0x22c9: 0x0001, 0x22ca: 0x0001, 0x22cb: 0x0001, + 0x22cc: 0x0001, 0x22cd: 0x0001, 0x22ce: 0x0001, 0x22cf: 0x0001, 0x22d0: 0x0002, 0x22d1: 0x0002, + 0x22d2: 0x0002, 0x22d3: 0x0002, 0x22d4: 0x0002, 0x22d5: 0x0002, 0x22d6: 0x0002, 0x22d7: 0x0002, + 0x22d8: 0x0002, 0x22d9: 0x0002, + 0x22e0: 0x0001, 0x22e1: 0x0001, 0x22e2: 0x0001, 0x22e3: 0x0001, + 0x22e4: 0x0001, 0x22e5: 0x0001, 0x22e6: 0x0001, 0x22e7: 0x0001, 0x22e8: 0x0001, 0x22e9: 0x0001, + 0x22ea: 0x0001, 0x22eb: 0x0001, 0x22ec: 0x0001, 0x22ed: 0x0001, 0x22ee: 0x0001, 0x22ef: 0x0001, + 0x22f0: 0x0002, 0x22f1: 0x0002, 0x22f2: 0x0002, 0x22f3: 0x0002, 0x22f4: 0x0002, 0x22f5: 0x0002, + 0x22f6: 0x0002, 0x22f7: 0x0002, 0x22f8: 0x0002, 0x22f9: 0x0002, 0x22fa: 0x0002, 0x22fb: 0x0002, + 0x22fc: 0x0002, 0x22fd: 0x0002, 0x22fe: 0x0002, 0x22ff: 0x0002, + // Block 0x8c, offset 0x2300 + 0x2300: 0x0002, 0x2301: 0x0002, 0x2302: 0x0002, 0x2303: 0x0002, 0x2304: 0x0002, 0x2305: 0x0002, + 0x2306: 0x0002, 0x2307: 0x0002, 0x2308: 0x0002, 0x2309: 0x0002, 0x230a: 0x0002, 0x230b: 0x0002, + 0x230c: 0x0002, 0x230d: 0x0002, 0x230e: 0x0002, 0x230f: 0x0002, 0x2310: 0x0002, 0x2311: 0x0002, + 0x2312: 0x0002, 0x2314: 0x0002, 0x2315: 0x0002, 0x2316: 0x0002, 0x2317: 0x0002, + 0x2318: 0x0002, 0x2319: 0x0002, 0x231a: 0x0002, 0x231b: 0x0002, 0x231c: 0x0002, 0x231d: 0x0002, + 0x231e: 0x0002, 0x231f: 0x0002, 0x2320: 0x0002, 0x2321: 0x0002, 0x2322: 0x0002, 0x2323: 0x0002, + 0x2324: 0x0002, 0x2325: 0x0002, 0x2326: 0x0002, 0x2328: 0x0002, 0x2329: 0x0002, + 0x232a: 0x0002, 0x232b: 0x0002, + // Block 0x8d, offset 0x2340 + 0x2340: 0x0002, 0x2341: 0x0002, 0x2342: 0x0002, 0x2343: 0x0002, 0x2344: 0x0002, 0x2345: 0x0002, + 0x2346: 0x0002, 0x2347: 0x0002, 0x2348: 0x0002, 0x2349: 0x0002, 0x234a: 0x0002, 0x234b: 0x0002, + 0x234c: 0x0002, 0x234d: 0x0002, 0x234e: 0x0002, 0x234f: 0x0002, 0x2350: 0x0002, 0x2351: 0x0002, + 0x2352: 0x0002, 0x2353: 0x0002, 0x2354: 0x0002, 0x2355: 0x0002, 0x2356: 0x0002, 0x2357: 0x0002, + 0x2358: 0x0002, 0x2359: 0x0002, 0x235a: 0x0002, 0x235b: 0x0002, 0x235c: 0x0002, 0x235d: 0x0002, + 0x235e: 0x0002, 0x235f: 0x0002, 0x2360: 0x0002, + // Block 0x8e, offset 0x2380 + 0x23a0: 0x0002, 0x23a1: 0x0002, 0x23a2: 0x0002, 0x23a3: 0x0002, + 0x23a4: 0x0002, 0x23a5: 0x0002, 0x23a6: 0x0002, + 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23fd: 0x0001, + // Block 0x90, offset 0x2400 + 0x2420: 0x0001, + // Block 0x91, offset 0x2440 + 0x2476: 0x0001, 0x2477: 0x0001, 0x2478: 0x0001, 0x2479: 0x0001, 0x247a: 0x0001, + // Block 0x92, offset 0x2480 + 0x2481: 0x0001, 0x2482: 0x0001, 0x2483: 0x0001, 0x2485: 0x0001, + 0x2486: 0x0001, + 0x248c: 0x0001, 0x248d: 0x0001, 0x248e: 0x0001, 0x248f: 0x0001, + 0x24b8: 0x0001, 0x24b9: 0x0001, 0x24ba: 0x0001, + 0x24bf: 0x0001, + // Block 0x93, offset 0x24c0 + 0x24e5: 0x0001, 0x24e6: 0x0001, + // Block 0x94, offset 0x2500 + 0x2524: 0x0001, 0x2525: 0x0001, 0x2526: 0x0001, 0x2527: 0x0001, + // Block 0x95, offset 0x2540 + 0x256b: 0x0001, 0x256c: 0x0001, + // Block 0x96, offset 0x2580 + 0x25bd: 0x0001, 0x25be: 0x0001, 0x25bf: 0x0001, + // Block 0x97, offset 0x25c0 + 0x25c6: 0x0001, 0x25c7: 0x0001, 0x25c8: 0x0001, 0x25c9: 0x0001, 0x25ca: 0x0001, 0x25cb: 0x0001, + 0x25cc: 0x0001, 0x25cd: 0x0001, 0x25ce: 0x0001, 0x25cf: 0x0001, 0x25d0: 0x0001, + // Block 0x98, offset 0x2600 + 0x2602: 0x0001, 0x2603: 0x0001, 0x2604: 0x0001, 0x2605: 0x0001, + // Block 0x99, offset 0x2640 + 0x2641: 0x0001, + 0x2678: 0x0001, 0x2679: 0x0001, 0x267a: 0x0001, 0x267b: 0x0001, + 0x267c: 0x0001, 0x267d: 0x0001, 0x267e: 0x0001, 0x267f: 0x0001, + // Block 0x9a, offset 0x2680 + 0x2680: 0x0001, 0x2681: 0x0001, 0x2682: 0x0001, 0x2683: 0x0001, 0x2684: 0x0001, 0x2685: 0x0001, + 0x2686: 0x0001, + 0x26b0: 0x0001, 0x26b3: 0x0001, 0x26b4: 0x0001, + 0x26bf: 0x0001, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0001, 0x26c1: 0x0001, + 0x26f3: 0x0001, 0x26f4: 0x0001, 0x26f5: 0x0001, + 0x26f6: 0x0001, 0x26f9: 0x0001, 0x26fa: 0x0001, + 0x26fd: 0x0001, + // Block 0x9c, offset 0x2700 + 0x2702: 0x0001, + 0x270d: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, + 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, + // Block 0x9e, offset 0x2780 + 0x27b3: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280f: 0x0001, + // Block 0xa1, offset 0x2840 + 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2874: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, + 0x287e: 0x0001, + // Block 0xa2, offset 0x2880 + 0x289f: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x0001, + 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, + 0x28f0: 0x0001, 0x28f1: 0x0001, 0x28f2: 0x0001, 0x28f3: 0x0001, 0x28f4: 0x0001, + // Block 0xa4, offset 0x2900 + 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001, + 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, + 0x2946: 0x0001, + 0x295e: 0x0001, + // Block 0xa6, offset 0x2980 + 0x29b3: 0x0001, 0x29b4: 0x0001, 0x29b5: 0x0001, + 0x29b6: 0x0001, 0x29b7: 0x0001, 0x29b8: 0x0001, 0x29ba: 0x0001, + 0x29bf: 0x0001, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x0001, 0x29c2: 0x0001, 0x29c3: 0x0001, + // Block 0xa8, offset 0x2a00 + 0x2a32: 0x0001, 0x2a33: 0x0001, 0x2a34: 0x0001, 0x2a35: 0x0001, + 0x2a3c: 0x0001, 0x2a3d: 0x0001, 0x2a3f: 0x0001, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x0001, + 0x2a5c: 0x0001, 0x2a5d: 0x0001, + // Block 0xaa, offset 0x2a80 + 0x2ab3: 0x0001, 0x2ab4: 0x0001, 0x2ab5: 0x0001, + 0x2ab6: 0x0001, 0x2ab7: 0x0001, 0x2ab8: 0x0001, 0x2ab9: 0x0001, 0x2aba: 0x0001, + 0x2abd: 0x0001, 0x2abf: 0x0001, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x0001, + // Block 0xac, offset 0x2b00 + 0x2b2b: 0x0001, 0x2b2d: 0x0001, + 0x2b30: 0x0001, 0x2b31: 0x0001, 0x2b32: 0x0001, 0x2b33: 0x0001, 0x2b34: 0x0001, 0x2b35: 0x0001, + 0x2b37: 0x0001, + // Block 0xad, offset 0x2b40 + 0x2b5d: 0x0001, + 0x2b5e: 0x0001, 0x2b5f: 0x0001, 0x2b62: 0x0001, 0x2b63: 0x0001, + 0x2b64: 0x0001, 0x2b65: 0x0001, 0x2b67: 0x0001, 0x2b68: 0x0001, 0x2b69: 0x0001, + 0x2b6a: 0x0001, 0x2b6b: 0x0001, + // Block 0xae, offset 0x2b80 + 0x2baf: 0x0001, + 0x2bb0: 0x0001, 0x2bb1: 0x0001, 0x2bb2: 0x0001, 0x2bb3: 0x0001, 0x2bb4: 0x0001, 0x2bb5: 0x0001, + 0x2bb6: 0x0001, 0x2bb7: 0x0001, 0x2bb9: 0x0001, 0x2bba: 0x0001, + // Block 0xaf, offset 0x2bc0 + 0x2bfb: 0x0001, + 0x2bfc: 0x0001, 0x2bfe: 0x0001, + // Block 0xb0, offset 0x2c00 + 0x2c03: 0x0001, + // Block 0xb1, offset 0x2c40 + 0x2c54: 0x0001, 0x2c55: 0x0001, 0x2c56: 0x0001, 0x2c57: 0x0001, + 0x2c5a: 0x0001, 0x2c5b: 0x0001, + 0x2c60: 0x0001, + // Block 0xb2, offset 0x2c80 + 0x2c81: 0x0001, 0x2c82: 0x0001, 0x2c83: 0x0001, 0x2c84: 0x0001, 0x2c85: 0x0001, + 0x2c86: 0x0001, 0x2c87: 0x0001, 0x2c88: 0x0001, 0x2c89: 0x0001, 0x2c8a: 0x0001, + 0x2cb3: 0x0001, 0x2cb4: 0x0001, 0x2cb5: 0x0001, + 0x2cb6: 0x0001, 0x2cb7: 0x0001, 0x2cb8: 0x0001, 0x2cbb: 0x0001, + 0x2cbc: 0x0001, 0x2cbd: 0x0001, 0x2cbe: 0x0001, + // Block 0xb3, offset 0x2cc0 + 0x2cc7: 0x0001, + 0x2cd1: 0x0001, + 0x2cd2: 0x0001, 0x2cd3: 0x0001, 0x2cd4: 0x0001, 0x2cd5: 0x0001, 0x2cd6: 0x0001, + 0x2cd9: 0x0001, 0x2cda: 0x0001, 0x2cdb: 0x0001, + // Block 0xb4, offset 0x2d00 + 0x2d0a: 0x0001, 0x2d0b: 0x0001, + 0x2d0c: 0x0001, 0x2d0d: 0x0001, 0x2d0e: 0x0001, 0x2d0f: 0x0001, 0x2d10: 0x0001, 0x2d11: 0x0001, + 0x2d12: 0x0001, 0x2d13: 0x0001, 0x2d14: 0x0001, 0x2d15: 0x0001, 0x2d16: 0x0001, + 0x2d18: 0x0001, 0x2d19: 0x0001, + // Block 0xb5, offset 0x2d40 + 0x2d70: 0x0001, 0x2d71: 0x0001, 0x2d72: 0x0001, 0x2d73: 0x0001, 0x2d74: 0x0001, 0x2d75: 0x0001, + 0x2d76: 0x0001, 0x2d78: 0x0001, 0x2d79: 0x0001, 0x2d7a: 0x0001, 0x2d7b: 0x0001, + 0x2d7c: 0x0001, 0x2d7d: 0x0001, 0x2d7f: 0x0001, + // Block 0xb6, offset 0x2d80 + 0x2d92: 0x0001, 0x2d93: 0x0001, 0x2d94: 0x0001, 0x2d95: 0x0001, 0x2d96: 0x0001, 0x2d97: 0x0001, + 0x2d98: 0x0001, 0x2d99: 0x0001, 0x2d9a: 0x0001, 0x2d9b: 0x0001, 0x2d9c: 0x0001, 0x2d9d: 0x0001, + 0x2d9e: 0x0001, 0x2d9f: 0x0001, 0x2da0: 0x0001, 0x2da1: 0x0001, 0x2da2: 0x0001, 0x2da3: 0x0001, + 0x2da4: 0x0001, 0x2da5: 0x0001, 0x2da6: 0x0001, 0x2da7: 0x0001, + 0x2daa: 0x0001, 0x2dab: 0x0001, 0x2dac: 0x0001, 0x2dad: 0x0001, 0x2dae: 0x0001, 0x2daf: 0x0001, + 0x2db0: 0x0001, 0x2db2: 0x0001, 0x2db3: 0x0001, 0x2db5: 0x0001, + 0x2db6: 0x0001, + // Block 0xb7, offset 0x2dc0 + 0x2df1: 0x0001, 0x2df2: 0x0001, 0x2df3: 0x0001, 0x2df4: 0x0001, 0x2df5: 0x0001, + 0x2df6: 0x0001, 0x2dfa: 0x0001, + 0x2dfc: 0x0001, 0x2dfd: 0x0001, 0x2dff: 0x0001, + // Block 0xb8, offset 0x2e00 + 0x2e00: 0x0001, 0x2e01: 0x0001, 0x2e02: 0x0001, 0x2e03: 0x0001, 0x2e04: 0x0001, 0x2e05: 0x0001, + 0x2e07: 0x0001, + // Block 0xb9, offset 0x2e40 + 0x2e50: 0x0001, 0x2e51: 0x0001, + 0x2e55: 0x0001, 0x2e57: 0x0001, + // Block 0xba, offset 0x2e80 + 0x2eb3: 0x0001, 0x2eb4: 0x0001, + // Block 0xbb, offset 0x2ec0 + 0x2ec0: 0x0001, 0x2ec1: 0x0001, + 0x2ef6: 0x0001, 0x2ef7: 0x0001, 0x2ef8: 0x0001, 0x2ef9: 0x0001, 0x2efa: 0x0001, + // Block 0xbc, offset 0x2f00 + 0x2f00: 0x0001, 0x2f02: 0x0001, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x0001, + 0x2f47: 0x0001, 0x2f48: 0x0001, 0x2f49: 0x0001, 0x2f4a: 0x0001, 0x2f4b: 0x0001, + 0x2f4c: 0x0001, 0x2f4d: 0x0001, 0x2f4e: 0x0001, 0x2f4f: 0x0001, 0x2f50: 0x0001, 0x2f51: 0x0001, + 0x2f52: 0x0001, 0x2f53: 0x0001, 0x2f54: 0x0001, 0x2f55: 0x0001, + // Block 0xbe, offset 0x2f80 + 0x2fb0: 0x0001, 0x2fb1: 0x0001, 0x2fb2: 0x0001, 0x2fb3: 0x0001, 0x2fb4: 0x0001, + // Block 0xbf, offset 0x2fc0 + 0x2ff0: 0x0001, 0x2ff1: 0x0001, 0x2ff2: 0x0001, 0x2ff3: 0x0001, 0x2ff4: 0x0001, 0x2ff5: 0x0001, + 0x2ff6: 0x0001, + // Block 0xc0, offset 0x3000 + 0x300f: 0x0001, + // Block 0xc1, offset 0x3040 + 0x304f: 0x0001, 0x3050: 0x0001, 0x3051: 0x0001, + 0x3052: 0x0001, + // Block 0xc2, offset 0x3080 + 0x30a0: 0x0002, 0x30a1: 0x0002, 0x30a2: 0x0002, 0x30a3: 0x0002, + 0x30a4: 0x0001, + 0x30b0: 0x0002, 0x30b1: 0x0002, + // Block 0xc3, offset 0x30c0 + 0x30c0: 0x0002, 0x30c1: 0x0002, 0x30c2: 0x0002, 0x30c3: 0x0002, 0x30c4: 0x0002, 0x30c5: 0x0002, + 0x30c6: 0x0002, 0x30c7: 0x0002, 0x30c8: 0x0002, 0x30c9: 0x0002, 0x30ca: 0x0002, 0x30cb: 0x0002, + 0x30cc: 0x0002, 0x30cd: 0x0002, 0x30ce: 0x0002, 0x30cf: 0x0002, 0x30d0: 0x0002, 0x30d1: 0x0002, + 0x30d2: 0x0002, 0x30d3: 0x0002, 0x30d4: 0x0002, 0x30d5: 0x0002, 0x30d6: 0x0002, 0x30d7: 0x0002, + 0x30d8: 0x0002, 0x30d9: 0x0002, 0x30da: 0x0002, 0x30db: 0x0002, 0x30dc: 0x0002, 0x30dd: 0x0002, + 0x30de: 0x0002, 0x30df: 0x0002, 0x30e0: 0x0002, 0x30e1: 0x0002, 0x30e2: 0x0002, 0x30e3: 0x0002, + 0x30e4: 0x0002, 0x30e5: 0x0002, 0x30e6: 0x0002, 0x30e7: 0x0002, 0x30e8: 0x0002, 0x30e9: 0x0002, + 0x30ea: 0x0002, 0x30eb: 0x0002, 0x30ec: 0x0002, 0x30ed: 0x0002, 0x30ee: 0x0002, 0x30ef: 0x0002, + 0x30f0: 0x0002, 0x30f1: 0x0002, 0x30f2: 0x0002, 0x30f3: 0x0002, 0x30f4: 0x0002, 0x30f5: 0x0002, + 0x30f6: 0x0002, 0x30f7: 0x0002, + // Block 0xc4, offset 0x3100 + 0x3100: 0x0002, 0x3101: 0x0002, 0x3102: 0x0002, 0x3103: 0x0002, 0x3104: 0x0002, 0x3105: 0x0002, + 0x3106: 0x0002, 0x3107: 0x0002, 0x3108: 0x0002, 0x3109: 0x0002, 0x310a: 0x0002, 0x310b: 0x0002, + 0x310c: 0x0002, 0x310d: 0x0002, 0x310e: 0x0002, 0x310f: 0x0002, 0x3110: 0x0002, 0x3111: 0x0002, + 0x3112: 0x0002, 0x3113: 0x0002, 0x3114: 0x0002, 0x3115: 0x0002, + 0x313f: 0x0002, + // Block 0xc5, offset 0x3140 + 0x3140: 0x0002, 0x3141: 0x0002, 0x3142: 0x0002, 0x3143: 0x0002, 0x3144: 0x0002, 0x3145: 0x0002, + 0x3146: 0x0002, 0x3147: 0x0002, 0x3148: 0x0002, + // Block 0xc6, offset 0x3180 + 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b5: 0x0002, + 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, + 0x31bd: 0x0002, 0x31be: 0x0002, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x0002, 0x31c1: 0x0002, 0x31c2: 0x0002, 0x31c3: 0x0002, 0x31c4: 0x0002, 0x31c5: 0x0002, + 0x31c6: 0x0002, 0x31c7: 0x0002, 0x31c8: 0x0002, 0x31c9: 0x0002, 0x31ca: 0x0002, 0x31cb: 0x0002, + 0x31cc: 0x0002, 0x31cd: 0x0002, 0x31ce: 0x0002, 0x31cf: 0x0002, 0x31d0: 0x0002, 0x31d1: 0x0002, + 0x31d2: 0x0002, 0x31d3: 0x0002, 0x31d4: 0x0002, 0x31d5: 0x0002, 0x31d6: 0x0002, 0x31d7: 0x0002, + 0x31d8: 0x0002, 0x31d9: 0x0002, 0x31da: 0x0002, 0x31db: 0x0002, 0x31dc: 0x0002, 0x31dd: 0x0002, + 0x31de: 0x0002, 0x31df: 0x0002, 0x31e0: 0x0002, 0x31e1: 0x0002, 0x31e2: 0x0002, + 0x31f2: 0x0002, + // Block 0xc8, offset 0x3200 + 0x3210: 0x0002, 0x3211: 0x0002, + 0x3212: 0x0002, 0x3215: 0x0002, + 0x3224: 0x0002, 0x3225: 0x0002, 0x3226: 0x0002, 0x3227: 0x0002, + 0x3230: 0x0002, 0x3231: 0x0002, 0x3232: 0x0002, 0x3233: 0x0002, 0x3234: 0x0002, 0x3235: 0x0002, + 0x3236: 0x0002, 0x3237: 0x0002, 0x3238: 0x0002, 0x3239: 0x0002, 0x323a: 0x0002, 0x323b: 0x0002, + 0x323c: 0x0002, 0x323d: 0x0002, 0x323e: 0x0002, 0x323f: 0x0002, + // Block 0xc9, offset 0x3240 + 0x3240: 0x0002, 0x3241: 0x0002, 0x3242: 0x0002, 0x3243: 0x0002, 0x3244: 0x0002, 0x3245: 0x0002, + 0x3246: 0x0002, 0x3247: 0x0002, 0x3248: 0x0002, 0x3249: 0x0002, 0x324a: 0x0002, 0x324b: 0x0002, + 0x324c: 0x0002, 0x324d: 0x0002, 0x324e: 0x0002, 0x324f: 0x0002, 0x3250: 0x0002, 0x3251: 0x0002, + 0x3252: 0x0002, 0x3253: 0x0002, 0x3254: 0x0002, 0x3255: 0x0002, 0x3256: 0x0002, 0x3257: 0x0002, + 0x3258: 0x0002, 0x3259: 0x0002, 0x325a: 0x0002, 0x325b: 0x0002, 0x325c: 0x0002, 0x325d: 0x0002, + 0x325e: 0x0002, 0x325f: 0x0002, 0x3260: 0x0002, 0x3261: 0x0002, 0x3262: 0x0002, 0x3263: 0x0002, + 0x3264: 0x0002, 0x3265: 0x0002, 0x3266: 0x0002, 0x3267: 0x0002, 0x3268: 0x0002, 0x3269: 0x0002, + 0x326a: 0x0002, 0x326b: 0x0002, 0x326c: 0x0002, 0x326d: 0x0002, 0x326e: 0x0002, 0x326f: 0x0002, + 0x3270: 0x0002, 0x3271: 0x0002, 0x3272: 0x0002, 0x3273: 0x0002, 0x3274: 0x0002, 0x3275: 0x0002, + 0x3276: 0x0002, 0x3277: 0x0002, 0x3278: 0x0002, 0x3279: 0x0002, 0x327a: 0x0002, 0x327b: 0x0002, + // Block 0xca, offset 0x3280 + 0x329d: 0x0001, + 0x329e: 0x0001, 0x32a0: 0x0001, 0x32a1: 0x0001, 0x32a2: 0x0001, 0x32a3: 0x0001, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, + 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, + 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x0001, 0x32d1: 0x0001, + 0x32d2: 0x0001, 0x32d3: 0x0001, 0x32d4: 0x0001, 0x32d5: 0x0001, 0x32d6: 0x0001, 0x32d7: 0x0001, + 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, + 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, + 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, + 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, + 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, + 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, + 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, + // Block 0xcc, offset 0x3300 + 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x0001, 0x3305: 0x0001, + 0x3306: 0x0001, + // Block 0xcd, offset 0x3340 + 0x3367: 0x0001, 0x3368: 0x0001, 0x3369: 0x0001, + 0x3373: 0x0001, 0x3374: 0x0001, 0x3375: 0x0001, + 0x3376: 0x0001, 0x3377: 0x0001, 0x3378: 0x0001, 0x3379: 0x0001, 0x337a: 0x0001, 0x337b: 0x0001, + 0x337c: 0x0001, 0x337d: 0x0001, 0x337e: 0x0001, 0x337f: 0x0001, + // Block 0xce, offset 0x3380 + 0x3380: 0x0001, 0x3381: 0x0001, 0x3382: 0x0001, 0x3385: 0x0001, + 0x3386: 0x0001, 0x3387: 0x0001, 0x3388: 0x0001, 0x3389: 0x0001, 0x338a: 0x0001, 0x338b: 0x0001, + 0x33aa: 0x0001, 0x33ab: 0x0001, 0x33ac: 0x0001, 0x33ad: 0x0001, + // Block 0xcf, offset 0x33c0 + 0x33c2: 0x0001, 0x33c3: 0x0001, 0x33c4: 0x0001, + // Block 0xd0, offset 0x3400 + 0x3400: 0x0002, 0x3401: 0x0002, 0x3402: 0x0002, 0x3403: 0x0002, 0x3404: 0x0002, 0x3405: 0x0002, + 0x3406: 0x0002, 0x3407: 0x0002, 0x3408: 0x0002, 0x3409: 0x0002, 0x340a: 0x0002, 0x340b: 0x0002, + 0x340c: 0x0002, 0x340d: 0x0002, 0x340e: 0x0002, 0x340f: 0x0002, 0x3410: 0x0002, 0x3411: 0x0002, + 0x3412: 0x0002, 0x3413: 0x0002, 0x3414: 0x0002, 0x3415: 0x0002, 0x3416: 0x0002, + 0x3420: 0x0002, 0x3421: 0x0002, 0x3422: 0x0002, 0x3423: 0x0002, + 0x3424: 0x0002, 0x3425: 0x0002, 0x3426: 0x0002, 0x3427: 0x0002, 0x3428: 0x0002, 0x3429: 0x0002, + 0x342a: 0x0002, 0x342b: 0x0002, 0x342c: 0x0002, 0x342d: 0x0002, 0x342e: 0x0002, 0x342f: 0x0002, + 0x3430: 0x0002, 0x3431: 0x0002, 0x3432: 0x0002, 0x3433: 0x0002, 0x3434: 0x0002, 0x3435: 0x0002, + 0x3436: 0x0002, + // Block 0xd1, offset 0x3440 + 0x3440: 0x0001, 0x3441: 0x0001, 0x3442: 0x0001, 0x3443: 0x0001, 0x3444: 0x0001, 0x3445: 0x0001, + 0x3446: 0x0001, 0x3447: 0x0001, 0x3448: 0x0001, 0x3449: 0x0001, 0x344a: 0x0001, 0x344b: 0x0001, + 0x344c: 0x0001, 0x344d: 0x0001, 0x344e: 0x0001, 0x344f: 0x0001, 0x3450: 0x0001, 0x3451: 0x0001, + 0x3452: 0x0001, 0x3453: 0x0001, 0x3454: 0x0001, 0x3455: 0x0001, 0x3456: 0x0001, 0x3457: 0x0001, + 0x3458: 0x0001, 0x3459: 0x0001, 0x345a: 0x0001, 0x345b: 0x0001, 0x345c: 0x0001, 0x345d: 0x0001, + 0x345e: 0x0001, 0x345f: 0x0001, 0x3460: 0x0001, 0x3461: 0x0001, 0x3462: 0x0001, 0x3463: 0x0001, + 0x3464: 0x0001, 0x3465: 0x0001, 0x3466: 0x0001, 0x3467: 0x0001, 0x3468: 0x0001, 0x3469: 0x0001, + 0x346a: 0x0001, 0x346b: 0x0001, 0x346c: 0x0001, 0x346d: 0x0001, 0x346e: 0x0001, 0x346f: 0x0001, + 0x3470: 0x0001, 0x3471: 0x0001, 0x3472: 0x0001, 0x3473: 0x0001, 0x3474: 0x0001, 0x3475: 0x0001, + 0x3476: 0x0001, 0x347b: 0x0001, + 0x347c: 0x0001, 0x347d: 0x0001, 0x347e: 0x0001, 0x347f: 0x0001, + // Block 0xd2, offset 0x3480 + 0x3480: 0x0001, 0x3481: 0x0001, 0x3482: 0x0001, 0x3483: 0x0001, 0x3484: 0x0001, 0x3485: 0x0001, + 0x3486: 0x0001, 0x3487: 0x0001, 0x3488: 0x0001, 0x3489: 0x0001, 0x348a: 0x0001, 0x348b: 0x0001, + 0x348c: 0x0001, 0x348d: 0x0001, 0x348e: 0x0001, 0x348f: 0x0001, 0x3490: 0x0001, 0x3491: 0x0001, + 0x3492: 0x0001, 0x3493: 0x0001, 0x3494: 0x0001, 0x3495: 0x0001, 0x3496: 0x0001, 0x3497: 0x0001, + 0x3498: 0x0001, 0x3499: 0x0001, 0x349a: 0x0001, 0x349b: 0x0001, 0x349c: 0x0001, 0x349d: 0x0001, + 0x349e: 0x0001, 0x349f: 0x0001, 0x34a0: 0x0001, 0x34a1: 0x0001, 0x34a2: 0x0001, 0x34a3: 0x0001, + 0x34a4: 0x0001, 0x34a5: 0x0001, 0x34a6: 0x0001, 0x34a7: 0x0001, 0x34a8: 0x0001, 0x34a9: 0x0001, + 0x34aa: 0x0001, 0x34ab: 0x0001, 0x34ac: 0x0001, + 0x34b5: 0x0001, + // Block 0xd3, offset 0x34c0 + 0x34c4: 0x0001, + 0x34db: 0x0001, 0x34dc: 0x0001, 0x34dd: 0x0001, + 0x34de: 0x0001, 0x34df: 0x0001, 0x34e1: 0x0001, 0x34e2: 0x0001, 0x34e3: 0x0001, + 0x34e4: 0x0001, 0x34e5: 0x0001, 0x34e6: 0x0001, 0x34e7: 0x0001, 0x34e8: 0x0001, 0x34e9: 0x0001, + 0x34ea: 0x0001, 0x34eb: 0x0001, 0x34ec: 0x0001, 0x34ed: 0x0001, 0x34ee: 0x0001, 0x34ef: 0x0001, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001, + 0x3506: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001, + 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x0001, 0x3511: 0x0001, + 0x3512: 0x0001, 0x3513: 0x0001, 0x3514: 0x0001, 0x3515: 0x0001, 0x3516: 0x0001, 0x3517: 0x0001, + 0x3518: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001, + 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3523: 0x0001, + 0x3524: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001, + 0x352a: 0x0001, + // Block 0xd5, offset 0x3540 + 0x356e: 0x0001, + // Block 0xd6, offset 0x3580 + 0x35ac: 0x0001, 0x35ad: 0x0001, 0x35ae: 0x0001, 0x35af: 0x0001, + // Block 0xd7, offset 0x35c0 + 0x35d0: 0x0001, 0x35d1: 0x0001, + 0x35d2: 0x0001, 0x35d3: 0x0001, 0x35d4: 0x0001, 0x35d5: 0x0001, 0x35d6: 0x0001, + // Block 0xd8, offset 0x3600 + 0x3604: 0x0001, 0x3605: 0x0001, + 0x3606: 0x0001, 0x3607: 0x0001, 0x3608: 0x0001, 0x3609: 0x0001, 0x360a: 0x0001, + // Block 0xd9, offset 0x3640 + 0x3644: 0x0002, + // Block 0xda, offset 0x3680 + 0x368f: 0x0002, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x0003, 0x36c1: 0x0003, 0x36c2: 0x0003, 0x36c3: 0x0003, 0x36c4: 0x0003, 0x36c5: 0x0003, + 0x36c6: 0x0003, 0x36c7: 0x0003, 0x36c8: 0x0003, 0x36c9: 0x0003, 0x36ca: 0x0003, + 0x36d0: 0x0003, 0x36d1: 0x0003, + 0x36d2: 0x0003, 0x36d3: 0x0003, 0x36d4: 0x0003, 0x36d5: 0x0003, 0x36d6: 0x0003, 0x36d7: 0x0003, + 0x36d8: 0x0003, 0x36d9: 0x0003, 0x36da: 0x0003, 0x36db: 0x0003, 0x36dc: 0x0003, 0x36dd: 0x0003, + 0x36de: 0x0003, 0x36df: 0x0003, 0x36e0: 0x0003, 0x36e1: 0x0003, 0x36e2: 0x0003, 0x36e3: 0x0003, + 0x36e4: 0x0003, 0x36e5: 0x0003, 0x36e6: 0x0003, 0x36e7: 0x0003, 0x36e8: 0x0003, 0x36e9: 0x0003, + 0x36ea: 0x0003, 0x36eb: 0x0003, 0x36ec: 0x0003, 0x36ed: 0x0003, + 0x36f0: 0x0003, 0x36f1: 0x0003, 0x36f2: 0x0003, 0x36f3: 0x0003, 0x36f4: 0x0003, 0x36f5: 0x0003, + 0x36f6: 0x0003, 0x36f7: 0x0003, 0x36f8: 0x0003, 0x36f9: 0x0003, 0x36fa: 0x0003, 0x36fb: 0x0003, + 0x36fc: 0x0003, 0x36fd: 0x0003, 0x36fe: 0x0003, 0x36ff: 0x0003, + // Block 0xdc, offset 0x3700 + 0x3700: 0x0003, 0x3701: 0x0003, 0x3702: 0x0003, 0x3703: 0x0003, 0x3704: 0x0003, 0x3705: 0x0003, + 0x3706: 0x0003, 0x3707: 0x0003, 0x3708: 0x0003, 0x3709: 0x0003, 0x370a: 0x0003, 0x370b: 0x0003, + 0x370c: 0x0003, 0x370d: 0x0003, 0x370e: 0x0003, 0x370f: 0x0003, 0x3710: 0x0003, 0x3711: 0x0003, + 0x3712: 0x0003, 0x3713: 0x0003, 0x3714: 0x0003, 0x3715: 0x0003, 0x3716: 0x0003, 0x3717: 0x0003, + 0x3718: 0x0003, 0x3719: 0x0003, 0x371a: 0x0003, 0x371b: 0x0003, 0x371c: 0x0003, 0x371d: 0x0003, + 0x371e: 0x0003, 0x371f: 0x0003, 0x3720: 0x0003, 0x3721: 0x0003, 0x3722: 0x0003, 0x3723: 0x0003, + 0x3724: 0x0003, 0x3725: 0x0003, 0x3726: 0x0003, 0x3727: 0x0003, 0x3728: 0x0003, 0x3729: 0x0003, + 0x3730: 0x0003, 0x3731: 0x0003, 0x3732: 0x0003, 0x3733: 0x0003, 0x3734: 0x0003, 0x3735: 0x0003, + 0x3736: 0x0003, 0x3737: 0x0003, 0x3738: 0x0003, 0x3739: 0x0003, 0x373a: 0x0003, 0x373b: 0x0003, + 0x373c: 0x0003, 0x373d: 0x0003, 0x373e: 0x0003, 0x373f: 0x0003, + // Block 0xdd, offset 0x3740 + 0x3740: 0x0003, 0x3741: 0x0003, 0x3742: 0x0003, 0x3743: 0x0003, 0x3744: 0x0003, 0x3745: 0x0003, + 0x3746: 0x0003, 0x3747: 0x0003, 0x3748: 0x0003, 0x3749: 0x0003, 0x374a: 0x0003, 0x374b: 0x0003, + 0x374c: 0x0003, 0x374d: 0x0003, 0x374e: 0x0002, 0x374f: 0x0003, 0x3750: 0x0003, 0x3751: 0x0002, + 0x3752: 0x0002, 0x3753: 0x0002, 0x3754: 0x0002, 0x3755: 0x0002, 0x3756: 0x0002, 0x3757: 0x0002, + 0x3758: 0x0002, 0x3759: 0x0002, 0x375a: 0x0002, 0x375b: 0x0003, 0x375c: 0x0003, 0x375d: 0x0003, + 0x375e: 0x0003, 0x375f: 0x0003, 0x3760: 0x0003, 0x3761: 0x0003, 0x3762: 0x0003, 0x3763: 0x0003, + 0x3764: 0x0003, 0x3765: 0x0003, 0x3766: 0x0003, 0x3767: 0x0003, 0x3768: 0x0003, 0x3769: 0x0003, + 0x376a: 0x0003, 0x376b: 0x0003, 0x376c: 0x0003, + // Block 0xde, offset 0x3780 + 0x37a6: 0x0002, 0x37a7: 0x0002, 0x37a8: 0x0002, 0x37a9: 0x0002, + 0x37aa: 0x0002, 0x37ab: 0x0002, 0x37ac: 0x0002, 0x37ad: 0x0002, 0x37ae: 0x0002, 0x37af: 0x0002, + 0x37b0: 0x0002, 0x37b1: 0x0002, 0x37b2: 0x0002, 0x37b3: 0x0002, 0x37b4: 0x0002, 0x37b5: 0x0002, + 0x37b6: 0x0002, 0x37b7: 0x0002, 0x37b8: 0x0002, 0x37b9: 0x0002, 0x37ba: 0x0002, 0x37bb: 0x0002, + 0x37bc: 0x0002, 0x37bd: 0x0002, 0x37be: 0x0002, 0x37bf: 0x0002, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, + 0x37d0: 0x0002, 0x37d1: 0x0002, + 0x37d2: 0x0002, 0x37d3: 0x0002, 0x37d4: 0x0002, 0x37d5: 0x0002, 0x37d6: 0x0002, 0x37d7: 0x0002, + 0x37d8: 0x0002, 0x37d9: 0x0002, 0x37da: 0x0002, 0x37db: 0x0002, 0x37dc: 0x0002, 0x37dd: 0x0002, + 0x37de: 0x0002, 0x37df: 0x0002, 0x37e0: 0x0002, 0x37e1: 0x0002, 0x37e2: 0x0002, 0x37e3: 0x0002, + 0x37e4: 0x0002, 0x37e5: 0x0002, 0x37e6: 0x0002, 0x37e7: 0x0002, 0x37e8: 0x0002, 0x37e9: 0x0002, + 0x37ea: 0x0002, 0x37eb: 0x0002, 0x37ec: 0x0002, 0x37ed: 0x0002, 0x37ee: 0x0002, 0x37ef: 0x0002, + 0x37f0: 0x0002, 0x37f1: 0x0002, 0x37f2: 0x0002, 0x37f3: 0x0002, 0x37f4: 0x0002, 0x37f5: 0x0002, + 0x37f6: 0x0002, 0x37f7: 0x0002, 0x37f8: 0x0002, 0x37f9: 0x0002, 0x37fa: 0x0002, 0x37fb: 0x0002, + // Block 0xe0, offset 0x3800 + 0x3800: 0x0002, 0x3801: 0x0002, 0x3802: 0x0002, 0x3803: 0x0002, 0x3804: 0x0002, 0x3805: 0x0002, + 0x3806: 0x0002, 0x3807: 0x0002, 0x3808: 0x0002, + 0x3810: 0x0002, 0x3811: 0x0002, + 0x3820: 0x0002, 0x3821: 0x0002, 0x3822: 0x0002, 0x3823: 0x0002, + 0x3824: 0x0002, 0x3825: 0x0002, + // Block 0xe1, offset 0x3840 + 0x3840: 0x0002, 0x3841: 0x0002, 0x3842: 0x0002, 0x3843: 0x0002, 0x3844: 0x0002, 0x3845: 0x0002, + 0x3846: 0x0002, 0x3847: 0x0002, 0x3848: 0x0002, 0x3849: 0x0002, 0x384a: 0x0002, 0x384b: 0x0002, + 0x384c: 0x0002, 0x384d: 0x0002, 0x384e: 0x0002, 0x384f: 0x0002, 0x3850: 0x0002, 0x3851: 0x0002, + 0x3852: 0x0002, 0x3853: 0x0002, 0x3854: 0x0002, 0x3855: 0x0002, 0x3856: 0x0002, 0x3857: 0x0002, + 0x3858: 0x0002, 0x3859: 0x0002, 0x385a: 0x0002, 0x385b: 0x0002, 0x385c: 0x0002, 0x385d: 0x0002, + 0x385e: 0x0002, 0x385f: 0x0002, 0x3860: 0x0002, + 0x386d: 0x0002, 0x386e: 0x0002, 0x386f: 0x0002, + 0x3870: 0x0002, 0x3871: 0x0002, 0x3872: 0x0002, 0x3873: 0x0002, 0x3874: 0x0002, 0x3875: 0x0002, + 0x3877: 0x0002, 0x3878: 0x0002, 0x3879: 0x0002, 0x387a: 0x0002, 0x387b: 0x0002, + 0x387c: 0x0002, 0x387d: 0x0002, 0x387e: 0x0002, 0x387f: 0x0002, + // Block 0xe2, offset 0x3880 + 0x3880: 0x0002, 0x3881: 0x0002, 0x3882: 0x0002, 0x3883: 0x0002, 0x3884: 0x0002, 0x3885: 0x0002, + 0x3886: 0x0002, 0x3887: 0x0002, 0x3888: 0x0002, 0x3889: 0x0002, 0x388a: 0x0002, 0x388b: 0x0002, + 0x388c: 0x0002, 0x388d: 0x0002, 0x388e: 0x0002, 0x388f: 0x0002, 0x3890: 0x0002, 0x3891: 0x0002, + 0x3892: 0x0002, 0x3893: 0x0002, 0x3894: 0x0002, 0x3895: 0x0002, 0x3896: 0x0002, 0x3897: 0x0002, + 0x3898: 0x0002, 0x3899: 0x0002, 0x389a: 0x0002, 0x389b: 0x0002, 0x389c: 0x0002, 0x389d: 0x0002, + 0x389e: 0x0002, 0x389f: 0x0002, 0x38a0: 0x0002, 0x38a1: 0x0002, 0x38a2: 0x0002, 0x38a3: 0x0002, + 0x38a4: 0x0002, 0x38a5: 0x0002, 0x38a6: 0x0002, 0x38a7: 0x0002, 0x38a8: 0x0002, 0x38a9: 0x0002, + 0x38aa: 0x0002, 0x38ab: 0x0002, 0x38ac: 0x0002, 0x38ad: 0x0002, 0x38ae: 0x0002, 0x38af: 0x0002, + 0x38b0: 0x0002, 0x38b1: 0x0002, 0x38b2: 0x0002, 0x38b3: 0x0002, 0x38b4: 0x0002, 0x38b5: 0x0002, + 0x38b6: 0x0002, 0x38b7: 0x0002, 0x38b8: 0x0002, 0x38b9: 0x0002, 0x38ba: 0x0002, 0x38bb: 0x0002, + 0x38bc: 0x0002, 0x38be: 0x0002, 0x38bf: 0x0002, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x0002, 0x38c1: 0x0002, 0x38c2: 0x0002, 0x38c3: 0x0002, 0x38c4: 0x0002, 0x38c5: 0x0002, + 0x38c6: 0x0002, 0x38c7: 0x0002, 0x38c8: 0x0002, 0x38c9: 0x0002, 0x38ca: 0x0002, 0x38cb: 0x0002, + 0x38cc: 0x0002, 0x38cd: 0x0002, 0x38ce: 0x0002, 0x38cf: 0x0002, 0x38d0: 0x0002, 0x38d1: 0x0002, + 0x38d2: 0x0002, 0x38d3: 0x0002, + 0x38e0: 0x0002, 0x38e1: 0x0002, 0x38e2: 0x0002, 0x38e3: 0x0002, + 0x38e4: 0x0002, 0x38e5: 0x0002, 0x38e6: 0x0002, 0x38e7: 0x0002, 0x38e8: 0x0002, 0x38e9: 0x0002, + 0x38ea: 0x0002, 0x38eb: 0x0002, 0x38ec: 0x0002, 0x38ed: 0x0002, 0x38ee: 0x0002, 0x38ef: 0x0002, + 0x38f0: 0x0002, 0x38f1: 0x0002, 0x38f2: 0x0002, 0x38f3: 0x0002, 0x38f4: 0x0002, 0x38f5: 0x0002, + 0x38f6: 0x0002, 0x38f7: 0x0002, 0x38f8: 0x0002, 0x38f9: 0x0002, 0x38fa: 0x0002, 0x38fb: 0x0002, + 0x38fc: 0x0002, 0x38fd: 0x0002, 0x38fe: 0x0002, 0x38ff: 0x0002, + // Block 0xe4, offset 0x3900 + 0x3900: 0x0002, 0x3901: 0x0002, 0x3902: 0x0002, 0x3903: 0x0002, 0x3904: 0x0002, 0x3905: 0x0002, + 0x3906: 0x0002, 0x3907: 0x0002, 0x3908: 0x0002, 0x3909: 0x0002, 0x390a: 0x0002, + 0x390f: 0x0002, 0x3910: 0x0002, 0x3911: 0x0002, + 0x3912: 0x0002, 0x3913: 0x0002, + 0x3920: 0x0002, 0x3921: 0x0002, 0x3922: 0x0002, 0x3923: 0x0002, + 0x3924: 0x0002, 0x3925: 0x0002, 0x3926: 0x0002, 0x3927: 0x0002, 0x3928: 0x0002, 0x3929: 0x0002, + 0x392a: 0x0002, 0x392b: 0x0002, 0x392c: 0x0002, 0x392d: 0x0002, 0x392e: 0x0002, 0x392f: 0x0002, + 0x3930: 0x0002, 0x3934: 0x0002, + 0x3938: 0x0002, 0x3939: 0x0002, 0x393a: 0x0002, 0x393b: 0x0002, + 0x393c: 0x0002, 0x393d: 0x0002, 0x393e: 0x0002, 0x393f: 0x0002, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0002, 0x3941: 0x0002, 0x3942: 0x0002, 0x3943: 0x0002, 0x3944: 0x0002, 0x3945: 0x0002, + 0x3946: 0x0002, 0x3947: 0x0002, 0x3948: 0x0002, 0x3949: 0x0002, 0x394a: 0x0002, 0x394b: 0x0002, + 0x394c: 0x0002, 0x394d: 0x0002, 0x394e: 0x0002, 0x394f: 0x0002, 0x3950: 0x0002, 0x3951: 0x0002, + 0x3952: 0x0002, 0x3953: 0x0002, 0x3954: 0x0002, 0x3955: 0x0002, 0x3956: 0x0002, 0x3957: 0x0002, + 0x3958: 0x0002, 0x3959: 0x0002, 0x395a: 0x0002, 0x395b: 0x0002, 0x395c: 0x0002, 0x395d: 0x0002, + 0x395e: 0x0002, 0x395f: 0x0002, 0x3960: 0x0002, 0x3961: 0x0002, 0x3962: 0x0002, 0x3963: 0x0002, + 0x3964: 0x0002, 0x3965: 0x0002, 0x3966: 0x0002, 0x3967: 0x0002, 0x3968: 0x0002, 0x3969: 0x0002, + 0x396a: 0x0002, 0x396b: 0x0002, 0x396c: 0x0002, 0x396d: 0x0002, 0x396e: 0x0002, 0x396f: 0x0002, + 0x3970: 0x0002, 0x3971: 0x0002, 0x3972: 0x0002, 0x3973: 0x0002, 0x3974: 0x0002, 0x3975: 0x0002, + 0x3976: 0x0002, 0x3977: 0x0002, 0x3978: 0x0002, 0x3979: 0x0002, 0x397a: 0x0002, 0x397b: 0x0002, + 0x397c: 0x0002, 0x397d: 0x0002, 0x397e: 0x0002, + // Block 0xe6, offset 0x3980 + 0x3980: 0x0002, 0x3982: 0x0002, 0x3983: 0x0002, 0x3984: 0x0002, 0x3985: 0x0002, + 0x3986: 0x0002, 0x3987: 0x0002, 0x3988: 0x0002, 0x3989: 0x0002, 0x398a: 0x0002, 0x398b: 0x0002, + 0x398c: 0x0002, 0x398d: 0x0002, 0x398e: 0x0002, 0x398f: 0x0002, 0x3990: 0x0002, 0x3991: 0x0002, + 0x3992: 0x0002, 0x3993: 0x0002, 0x3994: 0x0002, 0x3995: 0x0002, 0x3996: 0x0002, 0x3997: 0x0002, + 0x3998: 0x0002, 0x3999: 0x0002, 0x399a: 0x0002, 0x399b: 0x0002, 0x399c: 0x0002, 0x399d: 0x0002, + 0x399e: 0x0002, 0x399f: 0x0002, 0x39a0: 0x0002, 0x39a1: 0x0002, 0x39a2: 0x0002, 0x39a3: 0x0002, + 0x39a4: 0x0002, 0x39a5: 0x0002, 0x39a6: 0x0002, 0x39a7: 0x0002, 0x39a8: 0x0002, 0x39a9: 0x0002, + 0x39aa: 0x0002, 0x39ab: 0x0002, 0x39ac: 0x0002, 0x39ad: 0x0002, 0x39ae: 0x0002, 0x39af: 0x0002, + 0x39b0: 0x0002, 0x39b1: 0x0002, 0x39b2: 0x0002, 0x39b3: 0x0002, 0x39b4: 0x0002, 0x39b5: 0x0002, + 0x39b6: 0x0002, 0x39b7: 0x0002, 0x39b8: 0x0002, 0x39b9: 0x0002, 0x39ba: 0x0002, 0x39bb: 0x0002, + 0x39bc: 0x0002, 0x39bd: 0x0002, 0x39be: 0x0002, 0x39bf: 0x0002, + // Block 0xe7, offset 0x39c0 + 0x39c0: 0x0002, 0x39c1: 0x0002, 0x39c2: 0x0002, 0x39c3: 0x0002, 0x39c4: 0x0002, 0x39c5: 0x0002, + 0x39c6: 0x0002, 0x39c7: 0x0002, 0x39c8: 0x0002, 0x39c9: 0x0002, 0x39ca: 0x0002, 0x39cb: 0x0002, + 0x39cc: 0x0002, 0x39cd: 0x0002, 0x39ce: 0x0002, 0x39cf: 0x0002, 0x39d0: 0x0002, 0x39d1: 0x0002, + 0x39d2: 0x0002, 0x39d3: 0x0002, 0x39d4: 0x0002, 0x39d5: 0x0002, 0x39d6: 0x0002, 0x39d7: 0x0002, + 0x39d8: 0x0002, 0x39d9: 0x0002, 0x39da: 0x0002, 0x39db: 0x0002, 0x39dc: 0x0002, 0x39dd: 0x0002, + 0x39de: 0x0002, 0x39df: 0x0002, 0x39e0: 0x0002, 0x39e1: 0x0002, 0x39e2: 0x0002, 0x39e3: 0x0002, + 0x39e4: 0x0002, 0x39e5: 0x0002, 0x39e6: 0x0002, 0x39e7: 0x0002, 0x39e8: 0x0002, 0x39e9: 0x0002, + 0x39ea: 0x0002, 0x39eb: 0x0002, 0x39ec: 0x0002, 0x39ed: 0x0002, 0x39ee: 0x0002, 0x39ef: 0x0002, + 0x39f0: 0x0002, 0x39f1: 0x0002, 0x39f2: 0x0002, 0x39f3: 0x0002, 0x39f4: 0x0002, 0x39f5: 0x0002, + 0x39f6: 0x0002, 0x39f7: 0x0002, 0x39f8: 0x0002, 0x39f9: 0x0002, 0x39fa: 0x0002, 0x39fb: 0x0002, + 0x39fc: 0x0002, 0x39ff: 0x0002, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x0002, 0x3a01: 0x0002, 0x3a02: 0x0002, 0x3a03: 0x0002, 0x3a04: 0x0002, 0x3a05: 0x0002, + 0x3a06: 0x0002, 0x3a07: 0x0002, 0x3a08: 0x0002, 0x3a09: 0x0002, 0x3a0a: 0x0002, 0x3a0b: 0x0002, + 0x3a0c: 0x0002, 0x3a0d: 0x0002, 0x3a0e: 0x0002, 0x3a0f: 0x0002, 0x3a10: 0x0002, 0x3a11: 0x0002, + 0x3a12: 0x0002, 0x3a13: 0x0002, 0x3a14: 0x0002, 0x3a15: 0x0002, 0x3a16: 0x0002, 0x3a17: 0x0002, + 0x3a18: 0x0002, 0x3a19: 0x0002, 0x3a1a: 0x0002, 0x3a1b: 0x0002, 0x3a1c: 0x0002, 0x3a1d: 0x0002, + 0x3a1e: 0x0002, 0x3a1f: 0x0002, 0x3a20: 0x0002, 0x3a21: 0x0002, 0x3a22: 0x0002, 0x3a23: 0x0002, + 0x3a24: 0x0002, 0x3a25: 0x0002, 0x3a26: 0x0002, 0x3a27: 0x0002, 0x3a28: 0x0002, 0x3a29: 0x0002, + 0x3a2a: 0x0002, 0x3a2b: 0x0002, 0x3a2c: 0x0002, 0x3a2d: 0x0002, 0x3a2e: 0x0002, 0x3a2f: 0x0002, + 0x3a30: 0x0002, 0x3a31: 0x0002, 0x3a32: 0x0002, 0x3a33: 0x0002, 0x3a34: 0x0002, 0x3a35: 0x0002, + 0x3a36: 0x0002, 0x3a37: 0x0002, 0x3a38: 0x0002, 0x3a39: 0x0002, 0x3a3a: 0x0002, 0x3a3b: 0x0002, + 0x3a3c: 0x0002, 0x3a3d: 0x0002, + // Block 0xe9, offset 0x3a40 + 0x3a4b: 0x0002, + 0x3a4c: 0x0002, 0x3a4d: 0x0002, 0x3a4e: 0x0002, 0x3a50: 0x0002, 0x3a51: 0x0002, + 0x3a52: 0x0002, 0x3a53: 0x0002, 0x3a54: 0x0002, 0x3a55: 0x0002, 0x3a56: 0x0002, 0x3a57: 0x0002, + 0x3a58: 0x0002, 0x3a59: 0x0002, 0x3a5a: 0x0002, 0x3a5b: 0x0002, 0x3a5c: 0x0002, 0x3a5d: 0x0002, + 0x3a5e: 0x0002, 0x3a5f: 0x0002, 0x3a60: 0x0002, 0x3a61: 0x0002, 0x3a62: 0x0002, 0x3a63: 0x0002, + 0x3a64: 0x0002, 0x3a65: 0x0002, 0x3a66: 0x0002, 0x3a67: 0x0002, + 0x3a7a: 0x0002, + // Block 0xea, offset 0x3a80 + 0x3a95: 0x0002, 0x3a96: 0x0002, + 0x3aa4: 0x0002, + // Block 0xeb, offset 0x3ac0 + 0x3afb: 0x0002, + 0x3afc: 0x0002, 0x3afd: 0x0002, 0x3afe: 0x0002, 0x3aff: 0x0002, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x0002, 0x3b01: 0x0002, 0x3b02: 0x0002, 0x3b03: 0x0002, 0x3b04: 0x0002, 0x3b05: 0x0002, + 0x3b06: 0x0002, 0x3b07: 0x0002, 0x3b08: 0x0002, 0x3b09: 0x0002, 0x3b0a: 0x0002, 0x3b0b: 0x0002, + 0x3b0c: 0x0002, 0x3b0d: 0x0002, 0x3b0e: 0x0002, 0x3b0f: 0x0002, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x0002, 0x3b41: 0x0002, 0x3b42: 0x0002, 0x3b43: 0x0002, 0x3b44: 0x0002, 0x3b45: 0x0002, + 0x3b4c: 0x0002, 0x3b50: 0x0002, 0x3b51: 0x0002, + 0x3b52: 0x0002, 0x3b55: 0x0002, 0x3b56: 0x0002, 0x3b57: 0x0002, + 0x3b5c: 0x0002, 0x3b5d: 0x0002, + 0x3b5e: 0x0002, 0x3b5f: 0x0002, + 0x3b6b: 0x0002, 0x3b6c: 0x0002, + 0x3b74: 0x0002, 0x3b75: 0x0002, + 0x3b76: 0x0002, 0x3b77: 0x0002, 0x3b78: 0x0002, 0x3b79: 0x0002, 0x3b7a: 0x0002, 0x3b7b: 0x0002, + 0x3b7c: 0x0002, + // Block 0xee, offset 0x3b80 + 0x3ba0: 0x0002, 0x3ba1: 0x0002, 0x3ba2: 0x0002, 0x3ba3: 0x0002, + 0x3ba4: 0x0002, 0x3ba5: 0x0002, 0x3ba6: 0x0002, 0x3ba7: 0x0002, 0x3ba8: 0x0002, 0x3ba9: 0x0002, + 0x3baa: 0x0002, 0x3bab: 0x0002, + 0x3bb0: 0x0002, + // Block 0xef, offset 0x3bc0 + 0x3bcc: 0x0002, 0x3bcd: 0x0002, 0x3bce: 0x0002, 0x3bcf: 0x0002, 0x3bd0: 0x0002, 0x3bd1: 0x0002, + 0x3bd2: 0x0002, 0x3bd3: 0x0002, 0x3bd4: 0x0002, 0x3bd5: 0x0002, 0x3bd6: 0x0002, 0x3bd7: 0x0002, + 0x3bd8: 0x0002, 0x3bd9: 0x0002, 0x3bda: 0x0002, 0x3bdb: 0x0002, 0x3bdc: 0x0002, 0x3bdd: 0x0002, + 0x3bde: 0x0002, 0x3bdf: 0x0002, 0x3be0: 0x0002, 0x3be1: 0x0002, 0x3be2: 0x0002, 0x3be3: 0x0002, + 0x3be4: 0x0002, 0x3be5: 0x0002, 0x3be6: 0x0002, 0x3be7: 0x0002, 0x3be8: 0x0002, 0x3be9: 0x0002, + 0x3bea: 0x0002, 0x3beb: 0x0002, 0x3bec: 0x0002, 0x3bed: 0x0002, 0x3bee: 0x0002, 0x3bef: 0x0002, + 0x3bf0: 0x0002, 0x3bf1: 0x0002, 0x3bf2: 0x0002, 0x3bf3: 0x0002, 0x3bf4: 0x0002, 0x3bf5: 0x0002, + 0x3bf6: 0x0002, 0x3bf7: 0x0002, 0x3bf8: 0x0002, 0x3bf9: 0x0002, 0x3bfa: 0x0002, + 0x3bfc: 0x0002, 0x3bfd: 0x0002, 0x3bfe: 0x0002, 0x3bff: 0x0002, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x0002, 0x3c01: 0x0002, 0x3c02: 0x0002, 0x3c03: 0x0002, 0x3c04: 0x0002, 0x3c05: 0x0002, + 0x3c07: 0x0002, 0x3c08: 0x0002, 0x3c09: 0x0002, 0x3c0a: 0x0002, 0x3c0b: 0x0002, + 0x3c0c: 0x0002, 0x3c0d: 0x0002, 0x3c0e: 0x0002, 0x3c0f: 0x0002, 0x3c10: 0x0002, 0x3c11: 0x0002, + 0x3c12: 0x0002, 0x3c13: 0x0002, 0x3c14: 0x0002, 0x3c15: 0x0002, 0x3c16: 0x0002, 0x3c17: 0x0002, + 0x3c18: 0x0002, 0x3c19: 0x0002, 0x3c1a: 0x0002, 0x3c1b: 0x0002, 0x3c1c: 0x0002, 0x3c1d: 0x0002, + 0x3c1e: 0x0002, 0x3c1f: 0x0002, 0x3c20: 0x0002, 0x3c21: 0x0002, 0x3c22: 0x0002, 0x3c23: 0x0002, + 0x3c24: 0x0002, 0x3c25: 0x0002, 0x3c26: 0x0002, 0x3c27: 0x0002, 0x3c28: 0x0002, 0x3c29: 0x0002, + 0x3c2a: 0x0002, 0x3c2b: 0x0002, 0x3c2c: 0x0002, 0x3c2d: 0x0002, 0x3c2e: 0x0002, 0x3c2f: 0x0002, + 0x3c30: 0x0002, 0x3c31: 0x0002, 0x3c32: 0x0002, 0x3c33: 0x0002, 0x3c34: 0x0002, 0x3c35: 0x0002, + 0x3c36: 0x0002, 0x3c37: 0x0002, 0x3c38: 0x0002, 0x3c39: 0x0002, 0x3c3a: 0x0002, 0x3c3b: 0x0002, + 0x3c3c: 0x0002, 0x3c3d: 0x0002, 0x3c3e: 0x0002, 0x3c3f: 0x0002, + // Block 0xf1, offset 0x3c40 + 0x3c70: 0x0002, 0x3c71: 0x0002, 0x3c72: 0x0002, 0x3c73: 0x0002, 0x3c74: 0x0002, 0x3c75: 0x0002, + 0x3c76: 0x0002, 0x3c77: 0x0002, 0x3c78: 0x0002, 0x3c79: 0x0002, 0x3c7a: 0x0002, 0x3c7b: 0x0002, + 0x3c7c: 0x0002, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x0002, 0x3c81: 0x0002, 0x3c82: 0x0002, 0x3c83: 0x0002, 0x3c84: 0x0002, 0x3c85: 0x0002, + 0x3c86: 0x0002, 0x3c87: 0x0002, 0x3c88: 0x0002, 0x3c89: 0x0002, + 0x3c8f: 0x0002, 0x3c90: 0x0002, 0x3c91: 0x0002, + 0x3c92: 0x0002, 0x3c93: 0x0002, 0x3c94: 0x0002, 0x3c95: 0x0002, 0x3c96: 0x0002, 0x3c97: 0x0002, + 0x3c98: 0x0002, 0x3c99: 0x0002, 0x3c9a: 0x0002, 0x3c9b: 0x0002, 0x3c9c: 0x0002, 0x3c9d: 0x0002, + 0x3c9e: 0x0002, 0x3c9f: 0x0002, 0x3ca0: 0x0002, 0x3ca1: 0x0002, 0x3ca2: 0x0002, 0x3ca3: 0x0002, + 0x3ca4: 0x0002, 0x3ca5: 0x0002, 0x3ca6: 0x0002, 0x3ca7: 0x0002, 0x3ca8: 0x0002, 0x3ca9: 0x0002, + 0x3caa: 0x0002, 0x3cab: 0x0002, 0x3cac: 0x0002, 0x3cad: 0x0002, 0x3cae: 0x0002, 0x3caf: 0x0002, + 0x3cb0: 0x0002, 0x3cb1: 0x0002, 0x3cb2: 0x0002, 0x3cb3: 0x0002, 0x3cb4: 0x0002, 0x3cb5: 0x0002, + 0x3cb6: 0x0002, 0x3cb7: 0x0002, 0x3cb8: 0x0002, 0x3cb9: 0x0002, 0x3cba: 0x0002, 0x3cbb: 0x0002, + 0x3cbc: 0x0002, 0x3cbd: 0x0002, 0x3cbe: 0x0002, 0x3cbf: 0x0002, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x0002, 0x3cc1: 0x0002, 0x3cc2: 0x0002, 0x3cc3: 0x0002, 0x3cc4: 0x0002, 0x3cc5: 0x0002, + 0x3cc6: 0x0002, + 0x3cce: 0x0002, 0x3ccf: 0x0002, 0x3cd0: 0x0002, 0x3cd1: 0x0002, + 0x3cd2: 0x0002, 0x3cd3: 0x0002, 0x3cd4: 0x0002, 0x3cd5: 0x0002, 0x3cd6: 0x0002, 0x3cd7: 0x0002, + 0x3cd8: 0x0002, 0x3cd9: 0x0002, 0x3cda: 0x0002, 0x3cdb: 0x0002, 0x3cdc: 0x0002, + 0x3cdf: 0x0002, 0x3ce0: 0x0002, 0x3ce1: 0x0002, 0x3ce2: 0x0002, 0x3ce3: 0x0002, + 0x3ce4: 0x0002, 0x3ce5: 0x0002, 0x3ce6: 0x0002, 0x3ce7: 0x0002, 0x3ce8: 0x0002, 0x3ce9: 0x0002, + 0x3cf0: 0x0002, 0x3cf1: 0x0002, 0x3cf2: 0x0002, 0x3cf3: 0x0002, 0x3cf4: 0x0002, 0x3cf5: 0x0002, + 0x3cf6: 0x0002, 0x3cf7: 0x0002, 0x3cf8: 0x0002, + // Block 0xf4, offset 0x3d00 + 0x3d01: 0x0001, + 0x3d20: 0x0001, 0x3d21: 0x0001, 0x3d22: 0x0001, 0x3d23: 0x0001, + 0x3d24: 0x0001, 0x3d25: 0x0001, 0x3d26: 0x0001, 0x3d27: 0x0001, 0x3d28: 0x0001, 0x3d29: 0x0001, + 0x3d2a: 0x0001, 0x3d2b: 0x0001, 0x3d2c: 0x0001, 0x3d2d: 0x0001, 0x3d2e: 0x0001, 0x3d2f: 0x0001, + 0x3d30: 0x0001, 0x3d31: 0x0001, 0x3d32: 0x0001, 0x3d33: 0x0001, 0x3d34: 0x0001, 0x3d35: 0x0001, + 0x3d36: 0x0001, 0x3d37: 0x0001, 0x3d38: 0x0001, 0x3d39: 0x0001, 0x3d3a: 0x0001, 0x3d3b: 0x0001, + 0x3d3c: 0x0001, 0x3d3d: 0x0001, 0x3d3e: 0x0001, 0x3d3f: 0x0001, + // Block 0xf5, offset 0x3d40 + 0x3d40: 0x0003, 0x3d41: 0x0003, 0x3d42: 0x0003, 0x3d43: 0x0003, 0x3d44: 0x0003, 0x3d45: 0x0003, + 0x3d46: 0x0003, 0x3d47: 0x0003, 0x3d48: 0x0003, 0x3d49: 0x0003, 0x3d4a: 0x0003, 0x3d4b: 0x0003, + 0x3d4c: 0x0003, 0x3d4d: 0x0003, 0x3d4e: 0x0003, 0x3d4f: 0x0003, 0x3d50: 0x0003, 0x3d51: 0x0003, + 0x3d52: 0x0003, 0x3d53: 0x0003, 0x3d54: 0x0003, 0x3d55: 0x0003, 0x3d56: 0x0003, 0x3d57: 0x0003, + 0x3d58: 0x0003, 0x3d59: 0x0003, 0x3d5a: 0x0003, 0x3d5b: 0x0003, 0x3d5c: 0x0003, 0x3d5d: 0x0003, + 0x3d5e: 0x0003, 0x3d5f: 0x0003, 0x3d60: 0x0003, 0x3d61: 0x0003, 0x3d62: 0x0003, 0x3d63: 0x0003, + 0x3d64: 0x0003, 0x3d65: 0x0003, 0x3d66: 0x0003, 0x3d67: 0x0003, 0x3d68: 0x0003, 0x3d69: 0x0003, + 0x3d6a: 0x0003, 0x3d6b: 0x0003, 0x3d6c: 0x0003, 0x3d6d: 0x0003, 0x3d6e: 0x0003, 0x3d6f: 0x0003, + 0x3d70: 0x0003, 0x3d71: 0x0003, 0x3d72: 0x0003, 0x3d73: 0x0003, 0x3d74: 0x0003, 0x3d75: 0x0003, + 0x3d76: 0x0003, 0x3d77: 0x0003, 0x3d78: 0x0003, 0x3d79: 0x0003, 0x3d7a: 0x0003, 0x3d7b: 0x0003, + 0x3d7c: 0x0003, 0x3d7d: 0x0003, +} + +// stringWidthIndex: 30 blocks, 1920 entries, 1920 bytes +// Block 0 is the zero block. +var stringWidthIndex = [1920]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, 0xd2: 0x0e, 0xd6: 0x0f, 0xd7: 0x10, + 0xd8: 0x11, 0xd9: 0x12, 0xdb: 0x13, 0xdc: 0x14, 0xdd: 0x15, 0xde: 0x16, 0xdf: 0x17, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x17, 0xf3: 0x1a, 0xf4: 0x1b, + // Block 0x4, offset 0x100 + 0x120: 0x18, 0x121: 0x19, 0x122: 0x1a, 0x123: 0x1b, 0x124: 0x1c, 0x125: 0x1d, 0x126: 0x1e, 0x127: 0x1f, + 0x128: 0x20, 0x129: 0x21, 0x12a: 0x20, 0x12b: 0x22, 0x12c: 0x23, 0x12d: 0x24, 0x12e: 0x25, 0x12f: 0x26, + 0x130: 0x27, 0x131: 0x28, 0x132: 0x23, 0x133: 0x29, 0x134: 0x2a, 0x135: 0x2b, 0x136: 0x2c, 0x137: 0x2d, + 0x138: 0x2e, 0x139: 0x2f, 0x13a: 0x30, 0x13b: 0x31, 0x13c: 0x32, 0x13d: 0x33, 0x13e: 0x34, 0x13f: 0x35, + // Block 0x5, offset 0x140 + 0x140: 0x36, 0x141: 0x37, 0x142: 0x38, 0x144: 0x39, 0x145: 0x3a, + 0x14d: 0x3b, + 0x15c: 0x3c, 0x15d: 0x3d, 0x15e: 0x3e, 0x15f: 0x3f, + 0x160: 0x40, 0x162: 0x41, 0x164: 0x42, + 0x168: 0x43, 0x169: 0x44, 0x16a: 0x45, 0x16b: 0x46, 0x16c: 0x47, 0x16d: 0x48, 0x16e: 0x49, 0x16f: 0x4a, + 0x170: 0x4b, 0x173: 0x4c, 0x177: 0x08, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x56, 0x18a: 0x57, 0x18c: 0x58, 0x18f: 0x59, + 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x5b, 0x195: 0x5d, 0x196: 0x5e, 0x197: 0x5f, + 0x198: 0x60, 0x199: 0x61, 0x19a: 0x62, 0x19b: 0x63, 0x19c: 0x64, 0x19d: 0x65, 0x19e: 0x66, + 0x1ac: 0x67, 0x1ad: 0x68, + 0x1b3: 0x69, 0x1b5: 0x6a, 0x1b7: 0x6b, + 0x1ba: 0x6c, 0x1bb: 0x6d, 0x1bc: 0x39, 0x1bd: 0x39, 0x1be: 0x39, 0x1bf: 0x6e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6f, 0x1c1: 0x70, 0x1c2: 0x71, 0x1c3: 0x39, 0x1c4: 0x72, 0x1c5: 0x39, 0x1c6: 0x73, 0x1c7: 0x74, + 0x1c8: 0x75, 0x1c9: 0x76, 0x1ca: 0x39, 0x1cb: 0x39, 0x1cc: 0x39, 0x1cd: 0x39, 0x1ce: 0x39, 0x1cf: 0x39, + 0x1d0: 0x39, 0x1d1: 0x39, 0x1d2: 0x39, 0x1d3: 0x39, 0x1d4: 0x39, 0x1d5: 0x39, 0x1d6: 0x39, 0x1d7: 0x39, + 0x1d8: 0x39, 0x1d9: 0x39, 0x1da: 0x39, 0x1db: 0x39, 0x1dc: 0x39, 0x1dd: 0x39, 0x1de: 0x39, 0x1df: 0x39, + 0x1e0: 0x39, 0x1e1: 0x39, 0x1e2: 0x39, 0x1e3: 0x39, 0x1e4: 0x39, 0x1e5: 0x39, 0x1e6: 0x39, 0x1e7: 0x39, + 0x1e8: 0x39, 0x1e9: 0x39, 0x1ea: 0x39, 0x1eb: 0x39, 0x1ec: 0x39, 0x1ed: 0x39, 0x1ee: 0x39, 0x1ef: 0x39, + 0x1f0: 0x39, 0x1f1: 0x39, 0x1f2: 0x39, 0x1f3: 0x39, 0x1f4: 0x39, 0x1f5: 0x39, 0x1f6: 0x39, 0x1f7: 0x39, + 0x1f8: 0x39, 0x1f9: 0x39, 0x1fa: 0x39, 0x1fb: 0x39, 0x1fc: 0x39, 0x1fd: 0x39, 0x1fe: 0x39, 0x1ff: 0x39, + // Block 0x8, offset 0x200 + 0x200: 0x39, 0x201: 0x39, 0x202: 0x39, 0x203: 0x39, 0x204: 0x39, 0x205: 0x39, 0x206: 0x39, 0x207: 0x39, + 0x208: 0x39, 0x209: 0x39, 0x20a: 0x39, 0x20b: 0x39, 0x20c: 0x39, 0x20d: 0x39, 0x20e: 0x39, 0x20f: 0x39, + 0x210: 0x39, 0x211: 0x39, 0x212: 0x39, 0x213: 0x39, 0x214: 0x39, 0x215: 0x39, 0x216: 0x39, 0x217: 0x39, + 0x218: 0x39, 0x219: 0x39, 0x21a: 0x39, 0x21b: 0x39, 0x21c: 0x39, 0x21d: 0x39, 0x21e: 0x39, 0x21f: 0x39, + 0x220: 0x39, 0x221: 0x39, 0x222: 0x39, 0x223: 0x39, 0x224: 0x39, 0x225: 0x39, 0x226: 0x39, 0x227: 0x39, + 0x228: 0x39, 0x229: 0x39, 0x22a: 0x39, 0x22b: 0x39, 0x22c: 0x39, 0x22d: 0x39, 0x22e: 0x39, 0x22f: 0x39, + 0x230: 0x39, 0x231: 0x39, 0x232: 0x39, 0x233: 0x39, 0x234: 0x39, 0x235: 0x39, 0x236: 0x39, 0x237: 0x39, + 0x238: 0x39, 0x239: 0x39, 0x23a: 0x39, 0x23b: 0x39, 0x23c: 0x39, 0x23d: 0x39, 0x23e: 0x39, 0x23f: 0x39, + // Block 0x9, offset 0x240 + 0x240: 0x39, 0x241: 0x39, 0x242: 0x39, 0x243: 0x39, 0x244: 0x39, 0x245: 0x39, 0x246: 0x39, 0x247: 0x39, + 0x248: 0x39, 0x249: 0x39, 0x24a: 0x39, 0x24b: 0x39, 0x24c: 0x39, 0x24d: 0x39, 0x24e: 0x39, 0x24f: 0x39, + 0x250: 0x39, 0x251: 0x39, 0x252: 0x77, 0x253: 0x78, + 0x259: 0x79, 0x25a: 0x7a, 0x25b: 0x7b, + 0x260: 0x7c, 0x263: 0x7d, 0x264: 0x7e, 0x265: 0x7f, 0x266: 0x80, 0x267: 0x81, + 0x268: 0x82, 0x269: 0x83, 0x26a: 0x84, 0x26b: 0x85, 0x26f: 0x86, + 0x270: 0x39, 0x271: 0x39, 0x272: 0x39, 0x273: 0x39, 0x274: 0x39, 0x275: 0x39, 0x276: 0x39, 0x277: 0x39, + 0x278: 0x39, 0x279: 0x39, 0x27a: 0x39, 0x27b: 0x39, 0x27c: 0x39, 0x27d: 0x39, 0x27e: 0x39, 0x27f: 0x39, + // Block 0xa, offset 0x280 + 0x280: 0x39, 0x281: 0x39, 0x282: 0x39, 0x283: 0x39, 0x284: 0x39, 0x285: 0x39, 0x286: 0x39, 0x287: 0x39, + 0x288: 0x39, 0x289: 0x39, 0x28a: 0x39, 0x28b: 0x39, 0x28c: 0x39, 0x28d: 0x39, 0x28e: 0x39, 0x28f: 0x39, + 0x290: 0x39, 0x291: 0x39, 0x292: 0x39, 0x293: 0x39, 0x294: 0x39, 0x295: 0x39, 0x296: 0x39, 0x297: 0x39, + 0x298: 0x39, 0x299: 0x39, 0x29a: 0x39, 0x29b: 0x39, 0x29c: 0x39, 0x29d: 0x39, 0x29e: 0x87, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x5b, 0x2c1: 0x5b, 0x2c2: 0x5b, 0x2c3: 0x5b, 0x2c4: 0x5b, 0x2c5: 0x5b, 0x2c6: 0x5b, 0x2c7: 0x5b, + 0x2c8: 0x5b, 0x2c9: 0x5b, 0x2ca: 0x5b, 0x2cb: 0x5b, 0x2cc: 0x5b, 0x2cd: 0x5b, 0x2ce: 0x5b, 0x2cf: 0x5b, + 0x2d0: 0x5b, 0x2d1: 0x5b, 0x2d2: 0x5b, 0x2d3: 0x5b, 0x2d4: 0x5b, 0x2d5: 0x5b, 0x2d6: 0x5b, 0x2d7: 0x5b, + 0x2d8: 0x5b, 0x2d9: 0x5b, 0x2da: 0x5b, 0x2db: 0x5b, 0x2dc: 0x5b, 0x2dd: 0x5b, 0x2de: 0x5b, 0x2df: 0x5b, + 0x2e0: 0x5b, 0x2e1: 0x5b, 0x2e2: 0x5b, 0x2e3: 0x5b, 0x2e4: 0x5b, 0x2e5: 0x5b, 0x2e6: 0x5b, 0x2e7: 0x5b, + 0x2e8: 0x5b, 0x2e9: 0x5b, 0x2ea: 0x5b, 0x2eb: 0x5b, 0x2ec: 0x5b, 0x2ed: 0x5b, 0x2ee: 0x5b, 0x2ef: 0x5b, + 0x2f0: 0x5b, 0x2f1: 0x5b, 0x2f2: 0x5b, 0x2f3: 0x5b, 0x2f4: 0x5b, 0x2f5: 0x5b, 0x2f6: 0x5b, 0x2f7: 0x5b, + 0x2f8: 0x5b, 0x2f9: 0x5b, 0x2fa: 0x5b, 0x2fb: 0x5b, 0x2fc: 0x5b, 0x2fd: 0x5b, 0x2fe: 0x5b, 0x2ff: 0x5b, + // Block 0xc, offset 0x300 + 0x300: 0x5b, 0x301: 0x5b, 0x302: 0x5b, 0x303: 0x5b, 0x304: 0x5b, 0x305: 0x5b, 0x306: 0x5b, 0x307: 0x5b, + 0x308: 0x5b, 0x309: 0x5b, 0x30a: 0x5b, 0x30b: 0x5b, 0x30c: 0x5b, 0x30d: 0x5b, 0x30e: 0x5b, 0x30f: 0x5b, + 0x310: 0x5b, 0x311: 0x5b, 0x312: 0x5b, 0x313: 0x5b, 0x314: 0x5b, 0x315: 0x5b, 0x316: 0x5b, 0x317: 0x5b, + 0x318: 0x5b, 0x319: 0x5b, 0x31a: 0x5b, 0x31b: 0x5b, 0x31c: 0x5b, 0x31d: 0x5b, 0x31e: 0x5b, 0x31f: 0x5b, + 0x320: 0x5b, 0x321: 0x5b, 0x322: 0x5b, 0x323: 0x5b, 0x324: 0x39, 0x325: 0x39, 0x326: 0x39, 0x327: 0x39, + 0x328: 0x39, 0x329: 0x39, 0x32a: 0x39, 0x32b: 0x39, 0x32c: 0x88, + 0x338: 0x89, 0x339: 0x8a, 0x33b: 0x6a, 0x33c: 0x70, 0x33d: 0x8b, 0x33f: 0x8c, + // Block 0xd, offset 0x340 + 0x347: 0x8d, + 0x34b: 0x8e, 0x34d: 0x8f, + 0x368: 0x90, 0x36b: 0x91, + 0x374: 0x92, + 0x37a: 0x93, 0x37b: 0x94, 0x37d: 0x95, 0x37e: 0x96, + // Block 0xe, offset 0x380 + 0x380: 0x97, 0x381: 0x98, 0x382: 0x99, 0x383: 0x9a, 0x384: 0x9b, 0x385: 0x9c, 0x386: 0x9d, 0x387: 0x9e, + 0x388: 0x9f, 0x389: 0x2c, 0x38b: 0xa0, 0x38c: 0x2a, 0x38d: 0xa1, + 0x390: 0xa2, 0x391: 0xa3, 0x392: 0xa4, 0x393: 0xa5, 0x396: 0xa6, 0x397: 0xa7, + 0x398: 0xa8, 0x399: 0xa9, 0x39a: 0xaa, 0x39c: 0xab, + 0x3a0: 0xac, 0x3a4: 0xad, 0x3a5: 0xae, 0x3a7: 0xaf, + 0x3a8: 0xb0, 0x3a9: 0xb1, 0x3aa: 0xb2, + 0x3b0: 0xb3, 0x3b2: 0xb4, 0x3b4: 0xb5, 0x3b5: 0xb6, 0x3b6: 0xb7, + 0x3bb: 0xb8, 0x3bc: 0xb9, 0x3bd: 0xba, + // Block 0xf, offset 0x3c0 + 0x3d0: 0x45, 0x3d1: 0xbb, + // Block 0x10, offset 0x400 + 0x42b: 0xbc, 0x42c: 0xbd, + 0x43d: 0xbe, 0x43e: 0xbf, 0x43f: 0xc0, + // Block 0x11, offset 0x440 + 0x440: 0x39, 0x441: 0x39, 0x442: 0x39, 0x443: 0x39, 0x444: 0x39, 0x445: 0x39, 0x446: 0x39, 0x447: 0x39, + 0x448: 0x39, 0x449: 0x39, 0x44a: 0x39, 0x44b: 0x39, 0x44c: 0x39, 0x44d: 0x39, 0x44e: 0x39, 0x44f: 0x39, + 0x450: 0x39, 0x451: 0x39, 0x452: 0x39, 0x453: 0x39, 0x454: 0x39, 0x455: 0x39, 0x456: 0x39, 0x457: 0x39, + 0x458: 0x39, 0x459: 0x39, 0x45a: 0x39, 0x45b: 0x39, 0x45c: 0x39, 0x45d: 0x39, 0x45e: 0x39, 0x45f: 0xc1, + 0x460: 0x39, 0x461: 0x39, 0x462: 0x39, 0x463: 0x39, 0x464: 0x39, 0x465: 0x39, 0x466: 0x39, 0x467: 0x39, + 0x468: 0x39, 0x469: 0x39, 0x46a: 0x39, 0x46b: 0x39, 0x46c: 0x39, 0x46d: 0x39, 0x46e: 0x39, 0x46f: 0x39, + 0x470: 0x39, 0x471: 0x39, 0x472: 0x39, 0x473: 0xc2, 0x474: 0xc3, + // Block 0x12, offset 0x480 + 0x4bf: 0xc4, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x39, 0x4c1: 0x39, 0x4c2: 0x39, 0x4c3: 0x39, 0x4c4: 0xc5, 0x4c5: 0xc6, 0x4c6: 0x39, 0x4c7: 0x39, + 0x4c8: 0x39, 0x4c9: 0x39, 0x4ca: 0x39, 0x4cb: 0xc7, + 0x4f2: 0xc8, + // Block 0x14, offset 0x500 + 0x53c: 0xc9, 0x53d: 0xca, + // Block 0x15, offset 0x540 + 0x545: 0xcb, 0x546: 0xcc, + 0x549: 0xcd, 0x54c: 0x39, 0x54d: 0xce, + 0x568: 0xcf, 0x569: 0xd0, 0x56a: 0xd1, + // Block 0x16, offset 0x580 + 0x580: 0xd2, 0x582: 0xbe, 0x584: 0xbd, + 0x58a: 0xd3, 0x58b: 0xd4, + 0x593: 0xd4, + 0x5a3: 0xd5, 0x5a5: 0xd6, + // Block 0x17, offset 0x5c0 + 0x5c0: 0xd7, 0x5c3: 0xd8, 0x5c4: 0xd9, 0x5c5: 0xda, 0x5c6: 0xdb, 0x5c7: 0xdc, + 0x5c8: 0xdd, 0x5c9: 0xde, 0x5cc: 0xdf, 0x5cd: 0xe0, 0x5ce: 0xe1, 0x5cf: 0xe2, + 0x5d0: 0xe3, 0x5d1: 0xe4, 0x5d2: 0x39, 0x5d3: 0xe5, 0x5d4: 0xe6, 0x5d5: 0xe7, 0x5d6: 0xe8, 0x5d7: 0xe9, + 0x5d8: 0x39, 0x5d9: 0xea, 0x5da: 0x39, 0x5db: 0xeb, 0x5df: 0xec, + 0x5e4: 0xed, 0x5e5: 0xee, 0x5e6: 0x39, 0x5e7: 0x39, + 0x5e9: 0xef, 0x5ea: 0xf0, 0x5eb: 0xf1, + // Block 0x18, offset 0x600 + 0x600: 0x39, 0x601: 0x39, 0x602: 0x39, 0x603: 0x39, 0x604: 0x39, 0x605: 0x39, 0x606: 0x39, 0x607: 0x39, + 0x608: 0x39, 0x609: 0x39, 0x60a: 0x39, 0x60b: 0x39, 0x60c: 0x39, 0x60d: 0x39, 0x60e: 0x39, 0x60f: 0x39, + 0x610: 0x39, 0x611: 0x39, 0x612: 0x39, 0x613: 0x39, 0x614: 0x39, 0x615: 0x39, 0x616: 0x39, 0x617: 0x39, + 0x618: 0x39, 0x619: 0x39, 0x61a: 0x39, 0x61b: 0x39, 0x61c: 0x39, 0x61d: 0x39, 0x61e: 0x39, 0x61f: 0x39, + 0x620: 0x39, 0x621: 0x39, 0x622: 0x39, 0x623: 0x39, 0x624: 0x39, 0x625: 0x39, 0x626: 0x39, 0x627: 0x39, + 0x628: 0x39, 0x629: 0x39, 0x62a: 0x39, 0x62b: 0x39, 0x62c: 0x39, 0x62d: 0x39, 0x62e: 0x39, 0x62f: 0x39, + 0x630: 0x39, 0x631: 0x39, 0x632: 0x39, 0x633: 0x39, 0x634: 0x39, 0x635: 0x39, 0x636: 0x39, 0x637: 0x39, + 0x638: 0x39, 0x639: 0x39, 0x63a: 0x39, 0x63b: 0x39, 0x63c: 0x39, 0x63d: 0x39, 0x63e: 0x39, 0x63f: 0xe6, + // Block 0x19, offset 0x640 + 0x650: 0x0b, 0x651: 0x0c, 0x653: 0x0d, 0x656: 0x0e, 0x657: 0x06, + 0x658: 0x0f, 0x65a: 0x10, 0x65b: 0x11, 0x65c: 0x12, 0x65d: 0x13, 0x65e: 0x14, 0x65f: 0x15, + 0x660: 0x06, 0x661: 0x06, 0x662: 0x06, 0x663: 0x06, 0x664: 0x06, 0x665: 0x06, 0x666: 0x06, 0x667: 0x06, + 0x668: 0x06, 0x669: 0x06, 0x66a: 0x06, 0x66b: 0x06, 0x66c: 0x06, 0x66d: 0x06, 0x66e: 0x06, 0x66f: 0x16, + 0x670: 0x06, 0x671: 0x06, 0x672: 0x06, 0x673: 0x06, 0x674: 0x06, 0x675: 0x06, 0x676: 0x06, 0x677: 0x06, + 0x678: 0x06, 0x679: 0x06, 0x67a: 0x06, 0x67b: 0x06, 0x67c: 0x06, 0x67d: 0x06, 0x67e: 0x06, 0x67f: 0x16, + // Block 0x1a, offset 0x680 + 0x680: 0xf2, 0x681: 0x08, 0x684: 0x08, 0x685: 0x08, 0x686: 0x08, 0x687: 0x09, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x5b, 0x6c1: 0x5b, 0x6c2: 0x5b, 0x6c3: 0x5b, 0x6c4: 0x5b, 0x6c5: 0x5b, 0x6c6: 0x5b, 0x6c7: 0x5b, + 0x6c8: 0x5b, 0x6c9: 0x5b, 0x6ca: 0x5b, 0x6cb: 0x5b, 0x6cc: 0x5b, 0x6cd: 0x5b, 0x6ce: 0x5b, 0x6cf: 0x5b, + 0x6d0: 0x5b, 0x6d1: 0x5b, 0x6d2: 0x5b, 0x6d3: 0x5b, 0x6d4: 0x5b, 0x6d5: 0x5b, 0x6d6: 0x5b, 0x6d7: 0x5b, + 0x6d8: 0x5b, 0x6d9: 0x5b, 0x6da: 0x5b, 0x6db: 0x5b, 0x6dc: 0x5b, 0x6dd: 0x5b, 0x6de: 0x5b, 0x6df: 0x5b, + 0x6e0: 0x5b, 0x6e1: 0x5b, 0x6e2: 0x5b, 0x6e3: 0x5b, 0x6e4: 0x5b, 0x6e5: 0x5b, 0x6e6: 0x5b, 0x6e7: 0x5b, + 0x6e8: 0x5b, 0x6e9: 0x5b, 0x6ea: 0x5b, 0x6eb: 0x5b, 0x6ec: 0x5b, 0x6ed: 0x5b, 0x6ee: 0x5b, 0x6ef: 0x5b, + 0x6f0: 0x5b, 0x6f1: 0x5b, 0x6f2: 0x5b, 0x6f3: 0x5b, 0x6f4: 0x5b, 0x6f5: 0x5b, 0x6f6: 0x5b, 0x6f7: 0x5b, + 0x6f8: 0x5b, 0x6f9: 0x5b, 0x6fa: 0x5b, 0x6fb: 0x5b, 0x6fc: 0x5b, 0x6fd: 0x5b, 0x6fe: 0x5b, 0x6ff: 0xf3, + // Block 0x1c, offset 0x700 + 0x720: 0x18, + 0x730: 0x09, 0x731: 0x09, 0x732: 0x09, 0x733: 0x09, 0x734: 0x09, 0x735: 0x09, 0x736: 0x09, 0x737: 0x09, + 0x738: 0x09, 0x739: 0x09, 0x73a: 0x09, 0x73b: 0x09, 0x73c: 0x09, 0x73d: 0x09, 0x73e: 0x09, 0x73f: 0x19, + // Block 0x1d, offset 0x740 + 0x740: 0x09, 0x741: 0x09, 0x742: 0x09, 0x743: 0x09, 0x744: 0x09, 0x745: 0x09, 0x746: 0x09, 0x747: 0x09, + 0x748: 0x09, 0x749: 0x09, 0x74a: 0x09, 0x74b: 0x09, 0x74c: 0x09, 0x74d: 0x09, 0x74e: 0x09, 0x74f: 0x19, +} diff --git a/vendor/github.com/clipperhouse/displaywidth/width.go b/vendor/github.com/clipperhouse/displaywidth/width.go new file mode 100644 index 000000000..61e5cff5d --- /dev/null +++ b/vendor/github.com/clipperhouse/displaywidth/width.go @@ -0,0 +1,236 @@ +package displaywidth + +import ( + "unicode/utf8" + + "github.com/clipperhouse/stringish" + "github.com/clipperhouse/uax29/v2/graphemes" +) + +// Options allows you to specify the treatment of ambiguous East Asian +// characters. When EastAsianWidth is false (default), ambiguous East Asian +// characters are treated as width 1. When EastAsianWidth is true, ambiguous +// East Asian characters are treated as width 2. +type Options struct { + EastAsianWidth bool +} + +// DefaultOptions is the default options for the display width +// calculation, which is EastAsianWidth: false. +var DefaultOptions = Options{EastAsianWidth: false} + +// String calculates the display width of a string, +// by iterating over grapheme clusters in the string +// and summing their widths. +func String(s string) int { + return DefaultOptions.String(s) +} + +// String calculates the display width of a string, for the given options, by +// iterating over grapheme clusters in the string and summing their widths. +func (options Options) String(s string) int { + // Optimization: no need to parse grapheme + switch len(s) { + case 0: + return 0 + case 1: + return asciiWidth(s[0]) + } + + width := 0 + g := graphemes.FromString(s) + for g.Next() { + width += graphemeWidth(g.Value(), options) + } + return width +} + +// Bytes calculates the display width of a []byte, +// by iterating over grapheme clusters in the byte slice +// and summing their widths. +func Bytes(s []byte) int { + return DefaultOptions.Bytes(s) +} + +// Bytes calculates the display width of a []byte, for the given options, by +// iterating over grapheme clusters in the slice and summing their widths. +func (options Options) Bytes(s []byte) int { + // Optimization: no need to parse grapheme + switch len(s) { + case 0: + return 0 + case 1: + return asciiWidth(s[0]) + } + + width := 0 + g := graphemes.FromBytes(s) + for g.Next() { + width += graphemeWidth(g.Value(), options) + } + return width +} + +// Rune calculates the display width of a rune. You +// should almost certainly use [String] or [Bytes] for +// most purposes. +// +// The smallest unit of display width is a grapheme +// cluster, not a rune. Iterating over runes to measure +// width is incorrect in many cases. +func Rune(r rune) int { + return DefaultOptions.Rune(r) +} + +// Rune calculates the display width of a rune, for the given options. +// +// You should almost certainly use [String] or [Bytes] for most purposes. +// +// The smallest unit of display width is a grapheme cluster, not a rune. +// Iterating over runes to measure width is incorrect in many cases. +func (options Options) Rune(r rune) int { + if r < utf8.RuneSelf { + return asciiWidth(byte(r)) + } + + // Surrogates (U+D800-U+DFFF) are invalid UTF-8. + if r >= 0xD800 && r <= 0xDFFF { + return 0 + } + + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + + // Skip the grapheme iterator + return graphemeWidth(buf[:n], options) +} + +const _Default property = 0 + +// TruncateString truncates a string to the given maxWidth, and appends the +// given tail if the string is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func (options Options) TruncateString(s string, maxWidth int, tail string) string { + maxWidthWithoutTail := maxWidth - options.String(tail) + + var pos, total int + g := graphemes.FromString(s) + for g.Next() { + gw := graphemeWidth(g.Value(), options) + if total+gw <= maxWidthWithoutTail { + pos = g.End() + } + total += gw + if total > maxWidth { + return s[:pos] + tail + } + } + // No truncation + return s +} + +// TruncateString truncates a string to the given maxWidth, and appends the +// given tail if the string is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func TruncateString(s string, maxWidth int, tail string) string { + return DefaultOptions.TruncateString(s, maxWidth, tail) +} + +// TruncateBytes truncates a []byte to the given maxWidth, and appends the +// given tail if the []byte is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func (options Options) TruncateBytes(s []byte, maxWidth int, tail []byte) []byte { + maxWidthWithoutTail := maxWidth - options.Bytes(tail) + + var pos, total int + g := graphemes.FromBytes(s) + for g.Next() { + gw := graphemeWidth(g.Value(), options) + if total+gw <= maxWidthWithoutTail { + pos = g.End() + } + total += gw + if total > maxWidth { + result := make([]byte, 0, pos+len(tail)) + result = append(result, s[:pos]...) + result = append(result, tail...) + return result + } + } + // No truncation + return s +} + +// TruncateBytes truncates a []byte to the given maxWidth, and appends the +// given tail if the []byte is truncated. +// +// It ensures the total width, including the width of the tail, is less than or +// equal to maxWidth. +func TruncateBytes(s []byte, maxWidth int, tail []byte) []byte { + return DefaultOptions.TruncateBytes(s, maxWidth, tail) +} + +// graphemeWidth returns the display width of a grapheme cluster. +// The passed string must be a single grapheme cluster. +func graphemeWidth[T stringish.Interface](s T, options Options) int { + // Optimization: no need to look up properties + switch len(s) { + case 0: + return 0 + case 1: + return asciiWidth(s[0]) + } + + p, sz := lookup(s) + prop := property(p) + + // Variation Selector 16 (VS16) requests emoji presentation + if prop != _Wide && sz > 0 && len(s) >= sz+3 { + vs := s[sz : sz+3] + if isVS16(vs) { + prop = _Wide + } + // VS15 (0x8E) requests text presentation but does not affect width, + // in my reading of Unicode TR51. Falls through to return the base + // character's property. + } + + if options.EastAsianWidth && prop == _East_Asian_Ambiguous { + prop = _Wide + } + + if prop > upperBound { + prop = _Default + } + + return propertyWidths[prop] +} + +func asciiWidth(b byte) int { + if b <= 0x1F || b == 0x7F { + return 0 + } + return 1 +} + +// isVS16 checks if the slice matches VS16 (U+FE0F) UTF-8 encoding +// (EF B8 8F). It assumes len(s) >= 3. +func isVS16[T stringish.Interface](s T) bool { + return s[0] == 0xEF && s[1] == 0xB8 && s[2] == 0x8F +} + +// propertyWidths is a jump table of sorts, instead of a switch +var propertyWidths = [4]int{ + _Default: 1, + _Zero_Width: 0, + _Wide: 2, + _East_Asian_Ambiguous: 1, +} + +const upperBound = property(len(propertyWidths) - 1) diff --git a/vendor/github.com/clipperhouse/stringish/.gitignore b/vendor/github.com/clipperhouse/stringish/.gitignore new file mode 100644 index 000000000..12fbfb739 --- /dev/null +++ b/vendor/github.com/clipperhouse/stringish/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +*.test diff --git a/vendor/github.com/clipperhouse/stringish/LICENSE b/vendor/github.com/clipperhouse/stringish/LICENSE new file mode 100644 index 000000000..4b8064eb3 --- /dev/null +++ b/vendor/github.com/clipperhouse/stringish/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Matt Sherman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/clipperhouse/stringish/README.md b/vendor/github.com/clipperhouse/stringish/README.md new file mode 100644 index 000000000..fa1f7cc67 --- /dev/null +++ b/vendor/github.com/clipperhouse/stringish/README.md @@ -0,0 +1,64 @@ +# stringish + +A small Go module that provides a generic type constraint for “string-like” +data, and a utf8 package that works with both strings and byte slices +without conversions. + +```go +type Interface interface { + ~[]byte | ~string +} +``` + +[![Go Reference](https://pkg.go.dev/badge/github.com/clipperhouse/stringish/utf8.svg)](https://pkg.go.dev/github.com/clipperhouse/stringish/utf8) +[![Test Status](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml/badge.svg)](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml) + +## Install + +``` +go get github.com/clipperhouse/stringish +``` + +## Examples + +```go +import ( + "github.com/clipperhouse/stringish" + "github.com/clipperhouse/stringish/utf8" +) + +s := "Hello, 世界" +r, size := utf8.DecodeRune(s) // not DecodeRuneInString 🎉 + +b := []byte("Hello, 世界") +r, size = utf8.DecodeRune(b) // same API! + +func MyFoo[T stringish.Interface](s T) T { + // pass a string or a []byte + // iterate, slice, transform, whatever +} +``` + +## Motivation + +Sometimes we want APIs to accept `string` or `[]byte` without having to convert +between those types. That conversion usually allocates! + +By implementing with `stringish.Interface`, we can have a single API, and +single implementation for both types: one `Foo` instead of `Foo` and +`FooString`. + +We have converted the +[`unicode/utf8` package](https://github.com/clipperhouse/stringish/blob/main/utf8/utf8.go) +as an example -- note the absence of`*InString` funcs. We might look at `x/text` +next. + +## Used by + +- clipperhouse/uax29: [stringish trie](https://github.com/clipperhouse/uax29/blob/master/graphemes/trie.go#L27), [stringish iterator](https://github.com/clipperhouse/uax29/blob/master/internal/iterators/iterator.go#L9), [stringish SplitFunc](https://github.com/clipperhouse/uax29/blob/master/graphemes/splitfunc.go#L21) + +- [clipperhouse/displaywidth](https://github.com/clipperhouse/displaywidth) + +## Prior discussion + +- [Consideration of similar by the Go team](https://github.com/golang/go/issues/48643) diff --git a/vendor/github.com/clipperhouse/stringish/interface.go b/vendor/github.com/clipperhouse/stringish/interface.go new file mode 100644 index 000000000..adfeab61e --- /dev/null +++ b/vendor/github.com/clipperhouse/stringish/interface.go @@ -0,0 +1,5 @@ +package stringish + +type Interface interface { + ~[]byte | ~string +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md index 4d9a6d717..dc14d11e2 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md @@ -1,5 +1,9 @@ An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode version 15.0.0. +[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes) +![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) +![Fuzz](https://github.com/clipperhouse/uax29/actions/workflows/gofuzz.yml/badge.svg) + ## Quick start ``` @@ -18,15 +22,14 @@ for tokens.Next() { // Next() returns true until end of data } ``` -[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes) - _A grapheme is a “single visible character”, which might be a simple as a single letter, or a complex emoji that consists of several Unicode code points._ ## Conformance -We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29). Status: +We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29). -![Go](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) +![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) +![Fuzz](https://github.com/clipperhouse/uax29/actions/workflows/gofuzz.yml/badge.svg) ## APIs @@ -71,9 +74,18 @@ for tokens.Next() { // Next() returns true until end of data } ``` -### Performance +### Benchmarks -On a Mac M2 laptop, we see around 200MB/s, or around 100 million graphemes per second. You should see ~constant memory, and no allocations. +On a Mac M2 laptop, we see around 200MB/s, or around 100 million graphemes per second, and no allocations. + +``` +goos: darwin +goarch: arm64 +pkg: github.com/clipperhouse/uax29/graphemes/comparative +cpu: Apple M2 +BenchmarkGraphemes/clipperhouse/uax29-8 173805 ns/op 201.16 MB/s 0 B/op 0 allocs/op +BenchmarkGraphemes/rivo/uniseg-8 2045128 ns/op 17.10 MB/s 0 B/op 0 allocs/op +``` ### Invalid inputs diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go index 14b4ea2c4..1eaaa534c 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go @@ -1,8 +1,11 @@ package graphemes -import "github.com/clipperhouse/uax29/v2/internal/iterators" +import ( + "github.com/clipperhouse/stringish" + "github.com/clipperhouse/uax29/v2/internal/iterators" +) -type Iterator[T iterators.Stringish] struct { +type Iterator[T stringish.Interface] struct { *iterators.Iterator[T] } diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go index 08987f549..cbe1ec9ef 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go @@ -3,7 +3,7 @@ package graphemes import ( "bufio" - "github.com/clipperhouse/uax29/v2/internal/iterators" + "github.com/clipperhouse/stringish" ) // is determines if lookup intersects propert(ies) @@ -18,7 +18,7 @@ const _Ignore = _Extend // See https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries. var SplitFunc bufio.SplitFunc = splitFunc[[]byte] -func splitFunc[T iterators.Stringish](data T, atEOF bool) (advance int, token T, err error) { +func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, err error) { var empty T if len(data) == 0 { return 0, empty, nil diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go index c8c6c33bc..8aaabfacf 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go @@ -1,10 +1,10 @@ package graphemes +import "github.com/clipperhouse/stringish" + // generated by github.com/clipperhouse/uax29/v2 // from https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -import "github.com/clipperhouse/uax29/v2/internal/iterators" - type property uint16 const ( @@ -27,7 +27,7 @@ const ( // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. -func lookup[T iterators.Stringish](s T) (v property, sz int) { +func lookup[T stringish.Interface](s T) (v property, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII diff --git a/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go index 17e9b550b..e21348638 100644 --- a/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go +++ b/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go @@ -1,14 +1,12 @@ package iterators -type Stringish interface { - []byte | string -} +import "github.com/clipperhouse/stringish" -type SplitFunc[T Stringish] func(T, bool) (int, T, error) +type SplitFunc[T stringish.Interface] func(T, bool) (int, T, error) // Iterator is a generic iterator for words that are either []byte or string. // Iterate while Next() is true, and access the word via Value(). -type Iterator[T Stringish] struct { +type Iterator[T stringish.Interface] struct { split SplitFunc[T] data T start int @@ -16,7 +14,7 @@ type Iterator[T Stringish] struct { } // New creates a new Iterator for the given data and SplitFunc. -func New[T Stringish](split SplitFunc[T], data T) *Iterator[T] { +func New[T stringish.Interface](split SplitFunc[T], data T) *Iterator[T] { return &Iterator[T]{ split: split, data: data, @@ -83,3 +81,20 @@ func (iter *Iterator[T]) Reset() { iter.start = 0 iter.pos = 0 } + +func (iter *Iterator[T]) First() T { + if len(iter.data) == 0 { + return iter.data + } + advance, _, err := iter.split(iter.data, true) + if err != nil { + panic(err) + } + if advance <= 0 { + panic("SplitFunc returned a zero or negative advance") + } + if advance > len(iter.data) { + panic("SplitFunc advanced beyond the end of the data") + } + return iter.data[:advance] +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go index 052fc8d32..091061346 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go @@ -14,14 +14,14 @@ import "unsafe" type storageBuf [maxRate / 8]uint64 func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) + return (*[maxRate]byte)(unsafe.Pointer(b)) //nolint:gosec } // xorInuses unaligned reads and writes to update d.a to contain d.a // XOR buf. func xorIn(d *State, buf []byte) { n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] //nolint:gosec if n >= 72 { d.a[0] ^= bw[0] d.a[1] ^= bw[1] @@ -56,6 +56,6 @@ func xorIn(d *State, buf []byte) { } func copyOut(d *State, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) //nolint:gosec copy(buf, ab[:]) } diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go index 557d6f096..1247f1b62 100644 --- a/vendor/github.com/cloudflare/circl/sign/sign.go +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -38,6 +38,12 @@ type PrivateKey interface { encoding.BinaryMarshaler } +// A private key that retains the seed with which it was generated. +type Seeded interface { + // returns the seed if retained, otherwise nil + Seed() []byte +} + // A Scheme represents a specific instance of a signature scheme. type Scheme interface { // Name of the scheme. diff --git a/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml index e965034ed..3e8dd99bd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml +++ b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml @@ -9,6 +9,10 @@ version: "2" +run: + build-tags: + - libpathrs + linters: enable: - asasalint diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index 6862467c2..6d016d05c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,82 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.6.1] - 2025-11-19 ## + +> At last up jumped the cunning spider, and fiercely held her fast. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. + +## [0.5.2] - 2025-11-19 ## + +> "Will you walk into my parlour?" said a spider to a fly. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. + +## [0.6.0] - 2025-11-03 ## + +> By the Power of Greyskull! + +### Breaking ### +- The deprecated `MkdirAll`, `MkdirAllHandle`, `OpenInRoot`, `OpenatInRoot` and + `Reopen` wrappers have been removed. Please switch to using `pathrs-lite` + directly. + +### Added ### +- `pathrs-lite` now has support for using libpathrs as a backend. This is + opt-in and can be enabled at build time with the `libpathrs` build tag. The + intention is to allow for downstream libraries and other projects to make use + of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` package + and distributors can then opt-in to using `libpathrs` for the entire binary + if they wish. + +## [0.5.1] - 2025-10-31 ## + +> Spooky scary skeletons send shivers down your spine! + +### Changed ### +- `openat2` can return `-EAGAIN` if it detects a possible attack in certain + scenarios (namely if there was a rename or mount while walking a path with a + `..` component). While this is necessary to avoid a denial-of-service in the + kernel, it does require retry loops in userspace. + + In previous versions, `pathrs-lite` would retry `openat2` 32 times before + returning an error, but we've received user reports that this limit can be + hit on systems with very heavy load. In some synthetic benchmarks (testing + the worst-case of an attacker doing renames in a tight loop on every core of + a 16-core machine) we managed to get a ~3% failure rate in runc. We have + improved this situation in two ways: + + * We have now increased this limit to 128, which should be good enough for + most use-cases without becoming a denial-of-service vector (the number of + syscalls called by the `O_PATH` resolver in a typical case is within the + same ballpark). The same benchmarks show a failure rate of ~0.12% which + (while not zero) is probably sufficient for most users. + + * In addition, we now return a `unix.EAGAIN` error that is bubbled up and can + be detected by callers. This means that callers with stricter requirements + to avoid spurious errors can choose to do their own infinite `EAGAIN` retry + loop (though we would strongly recommend users use time-based deadlines in + such retry loops to avoid potentially unbounded denials-of-service). + ## [0.5.0] - 2025-09-26 ## > Let the past die. Kill it if you have to. @@ -354,7 +430,11 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...HEAD +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.1...HEAD +[0.6.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...v0.6.1 +[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.6.0 +[0.5.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.5.2 +[0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 8f0916f76..ee6cdce3c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.5.0 +0.6.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go b/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go deleted file mode 100644 index 3e427b164..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package securejoin - -import ( - "github.com/cyphar/filepath-securejoin/pathrs-lite" -) - -var ( - // MkdirAll is a wrapper around [pathrs.MkdirAll]. - // - // Deprecated: You should use [pathrs.MkdirAll] directly instead. This - // wrapper will be removed in filepath-securejoin v0.6. - MkdirAll = pathrs.MkdirAll - - // MkdirAllHandle is a wrapper around [pathrs.MkdirAllHandle]. - // - // Deprecated: You should use [pathrs.MkdirAllHandle] directly instead. - // This wrapper will be removed in filepath-securejoin v0.6. - MkdirAllHandle = pathrs.MkdirAllHandle - - // OpenInRoot is a wrapper around [pathrs.OpenInRoot]. - // - // Deprecated: You should use [pathrs.OpenInRoot] directly instead. This - // wrapper will be removed in filepath-securejoin v0.6. - OpenInRoot = pathrs.OpenInRoot - - // OpenatInRoot is a wrapper around [pathrs.OpenatInRoot]. - // - // Deprecated: You should use [pathrs.OpenatInRoot] directly instead. This - // wrapper will be removed in filepath-securejoin v0.6. - OpenatInRoot = pathrs.OpenatInRoot - - // Reopen is a wrapper around [pathrs.Reopen]. - // - // Deprecated: You should use [pathrs.Reopen] directly instead. This - // wrapper will be removed in filepath-securejoin v0.6. - Reopen = pathrs.Reopen -) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md deleted file mode 100644 index 1be727e75..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md +++ /dev/null @@ -1,33 +0,0 @@ -## `pathrs-lite` ## - -`github.com/cyphar/filepath-securejoin/pathrs-lite` provides a minimal **pure -Go** implementation of the core bits of [libpathrs][]. This is not intended to -be a complete replacement for libpathrs, instead it is mainly intended to be -useful as a transition tool for existing Go projects. - -The long-term plan for `pathrs-lite` is to provide a build tag that will cause -all `pathrs-lite` operations to call into libpathrs directly, thus removing -code duplication for projects that wish to make use of libpathrs (and providing -the ability for software packagers to opt-in to libpathrs support without -needing to patch upstream). - -[libpathrs]: https://github.com/cyphar/libpathrs - -### License ### - -Most of this subpackage is licensed under the Mozilla Public License (version -2.0). For more information, see the top-level [COPYING.md][] and -[LICENSE.MPL-2.0][] files, as well as the individual license headers for each -file. - -``` -Copyright (C) 2024-2025 Aleksa Sarai -Copyright (C) 2024-2025 SUSE LLC - -This Source Code Form is subject to the terms of the Mozilla Public -License, v. 2.0. If a copy of the MPL was not distributed with this -file, You can obtain one at https://mozilla.org/MPL/2.0/. -``` - -[COPYING.md]: ../COPYING.md -[LICENSE.MPL-2.0]: ../LICENSE.MPL-2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go deleted file mode 100644 index d3d745175..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package pathrs (pathrs-lite) is a less complete pure Go implementation of -// some of the APIs provided by [libpathrs]. -package pathrs diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go deleted file mode 100644 index 595dfbf1a..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Copyright (C) 2025 Aleksa Sarai -// Copyright (C) 2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package assert provides some basic assertion helpers for Go. -package assert - -import ( - "fmt" -) - -// Assert panics if the predicate is false with the provided argument. -func Assert(predicate bool, msg any) { - if !predicate { - panic(msg) - } -} - -// Assertf panics if the predicate is false and formats the message using the -// same formatting as [fmt.Printf]. -// -// [fmt.Printf]: https://pkg.go.dev/fmt#Printf -func Assertf(predicate bool, fmtMsg string, args ...any) { - Assert(predicate, fmt.Sprintf(fmtMsg, args...)) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go deleted file mode 100644 index c26e440e9..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package internal contains unexported common code for filepath-securejoin. -package internal - -import ( - "errors" -) - -var ( - // ErrPossibleAttack indicates that some attack was detected. - ErrPossibleAttack = errors.New("possible attack detected") - - // ErrPossibleBreakout indicates that during an operation we ended up in a - // state that could be a breakout but we detected it. - ErrPossibleBreakout = errors.New("possible breakout detected") - - // ErrInvalidDirectory indicates an unlinked directory. - ErrInvalidDirectory = errors.New("wandered into deleted directory") - - // ErrDeletedInode indicates an unlinked file (non-directory). - ErrDeletedInode = errors.New("cannot verify path of deleted inode") -) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go deleted file mode 100644 index 091054913..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package fd - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" -) - -// prepareAtWith returns -EBADF (an invalid fd) if dir is nil, otherwise using -// the dir.Fd(). We use -EBADF because in filepath-securejoin we generally -// don't want to allow relative-to-cwd paths. The returned path is an -// *informational* string that describes a reasonable pathname for the given -// *at(2) arguments. You must not use the full path for any actual filesystem -// operations. -func prepareAt(dir Fd, path string) (dirFd int, unsafeUnmaskedPath string) { - dirFd, dirPath := -int(unix.EBADF), "." - if dir != nil { - dirFd, dirPath = int(dir.Fd()), dir.Name() - } - if !filepath.IsAbs(path) { - // only prepend the dirfd path for relative paths - path = dirPath + "/" + path - } - // NOTE: If path is "." or "", the returned path won't be filepath.Clean, - // but that's okay since this path is either used for errors (in which case - // a trailing "/" or "/." is important information) or will be - // filepath.Clean'd later (in the case of fd.Openat). - return dirFd, path -} - -// Openat is an [Fd]-based wrapper around unix.Openat. -func Openat(dir Fd, path string, flags int, mode int) (*os.File, error) { //nolint:unparam // wrapper func - dirFd, fullPath := prepareAt(dir, path) - // Make sure we always set O_CLOEXEC. - flags |= unix.O_CLOEXEC - fd, err := unix.Openat(dirFd, path, flags, uint32(mode)) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - // openat is only used with lexically-safe paths so we can use - // filepath.Clean here, and also the path itself is not going to be used - // for actual path operations. - fullPath = filepath.Clean(fullPath) - return os.NewFile(uintptr(fd), fullPath), nil -} - -// Fstatat is an [Fd]-based wrapper around unix.Fstatat. -func Fstatat(dir Fd, path string, flags int) (unix.Stat_t, error) { - dirFd, fullPath := prepareAt(dir, path) - var stat unix.Stat_t - if err := unix.Fstatat(dirFd, path, &stat, flags); err != nil { - return stat, &os.PathError{Op: "fstatat", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - return stat, nil -} - -// Faccessat is an [Fd]-based wrapper around unix.Faccessat. -func Faccessat(dir Fd, path string, mode uint32, flags int) error { - dirFd, fullPath := prepareAt(dir, path) - err := unix.Faccessat(dirFd, path, mode, flags) - if err != nil { - err = &os.PathError{Op: "faccessat", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - return err -} - -// Readlinkat is an [Fd]-based wrapper around unix.Readlinkat. -func Readlinkat(dir Fd, path string) (string, error) { - dirFd, fullPath := prepareAt(dir, path) - size := 4096 - for { - linkBuf := make([]byte, size) - n, err := unix.Readlinkat(dirFd, path, linkBuf) - if err != nil { - return "", &os.PathError{Op: "readlinkat", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - if n != size { - return string(linkBuf[:n]), nil - } - // Possible truncation, resize the buffer. - size *= 2 - } -} - -const ( - // STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to - // avoid bumping the requirement for a single constant we can just define it - // ourselves. - _STATX_MNT_ID_UNIQUE = 0x4000 //nolint:revive // unix.* name - - // We don't care which mount ID we get. The kernel will give us the unique - // one if it is supported. If the kernel doesn't support - // STATX_MNT_ID_UNIQUE, the bit is ignored and the returned request mask - // will only contain STATX_MNT_ID (if supported). - wantStatxMntMask = _STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID -) - -var hasStatxMountID = gocompat.SyncOnceValue(func() bool { - var stx unix.Statx_t - err := unix.Statx(-int(unix.EBADF), "/", 0, wantStatxMntMask, &stx) - return err == nil && stx.Mask&wantStatxMntMask != 0 -}) - -// GetMountID gets the mount identifier associated with the fd and path -// combination. It is effectively a wrapper around fetching -// STATX_MNT_ID{,_UNIQUE} with unix.Statx, but with a fallback to 0 if the -// kernel doesn't support the feature. -func GetMountID(dir Fd, path string) (uint64, error) { - // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. - if !hasStatxMountID() { - return 0, nil - } - - dirFd, fullPath := prepareAt(dir, path) - - var stx unix.Statx_t - err := unix.Statx(dirFd, path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, wantStatxMntMask, &stx) - if stx.Mask&wantStatxMntMask == 0 { - // It's not a kernel limitation, for some reason we couldn't get a - // mount ID. Assume it's some kind of attack. - err = fmt.Errorf("could not get mount id: %w", err) - } - if err != nil { - return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - return stx.Mnt_id, nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go deleted file mode 100644 index d2206a386..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Copyright (C) 2025 Aleksa Sarai -// Copyright (C) 2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package fd provides a drop-in interface-based replacement of [*os.File] that -// allows for things like noop-Close wrappers to be used. -// -// [*os.File]: https://pkg.go.dev/os#File -package fd - -import ( - "io" - "os" -) - -// Fd is an interface that mirrors most of the API of [*os.File], allowing you -// to create wrappers that can be used in place of [*os.File]. -// -// [*os.File]: https://pkg.go.dev/os#File -type Fd interface { - io.Closer - Name() string - Fd() uintptr -} - -// Compile-time interface checks. -var ( - _ Fd = (*os.File)(nil) - _ Fd = noClose{} -) - -type noClose struct{ inner Fd } - -func (f noClose) Name() string { return f.inner.Name() } -func (f noClose) Fd() uintptr { return f.inner.Fd() } - -func (f noClose) Close() error { return nil } - -// NopCloser returns an [*os.File]-like object where the [Close] method is now -// a no-op. -// -// Note that for [*os.File] and similar objects, the Go garbage collector will -// still call [Close] on the underlying file unless you use -// [runtime.SetFinalizer] to disable this behaviour. This is up to the caller -// to do (if necessary). -// -// [*os.File]: https://pkg.go.dev/os#File -// [Close]: https://pkg.go.dev/io#Closer -// [runtime.SetFinalizer]: https://pkg.go.dev/runtime#SetFinalizer -func NopCloser(f Fd) Fd { return noClose{inner: f} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go deleted file mode 100644 index e1ec3c0b8..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package fd - -import ( - "fmt" - "os" - "runtime" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" -) - -// DupWithName creates a new file descriptor referencing the same underlying -// file, but with the provided name instead of fd.Name(). -func DupWithName(fd Fd, name string) (*os.File, error) { - fd2, err := unix.FcntlInt(fd.Fd(), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) - } - runtime.KeepAlive(fd) - return os.NewFile(uintptr(fd2), name), nil -} - -// Dup creates a new file description referencing the same underlying file. -func Dup(fd Fd) (*os.File, error) { - return DupWithName(fd, fd.Name()) -} - -// Fstat is an [Fd]-based wrapper around unix.Fstat. -func Fstat(fd Fd) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstat(int(fd.Fd()), &stat); err != nil { - return stat, &os.PathError{Op: "fstat", Path: fd.Name(), Err: err} - } - runtime.KeepAlive(fd) - return stat, nil -} - -// Fstatfs is an [Fd]-based wrapper around unix.Fstatfs. -func Fstatfs(fd Fd) (unix.Statfs_t, error) { - var statfs unix.Statfs_t - if err := unix.Fstatfs(int(fd.Fd()), &statfs); err != nil { - return statfs, &os.PathError{Op: "fstatfs", Path: fd.Name(), Err: err} - } - runtime.KeepAlive(fd) - return statfs, nil -} - -// IsDeadInode detects whether the file has been unlinked from a filesystem and -// is thus a "dead inode" from the kernel's perspective. -func IsDeadInode(file Fd) error { - // If the nlink of a file drops to 0, there is an attacker deleting - // directories during our walk, which could result in weird /proc values. - // It's better to error out in this case. - stat, err := Fstat(file) - if err != nil { - return fmt.Errorf("check for dead inode: %w", err) - } - if stat.Nlink == 0 { - err := internal.ErrDeletedInode - if stat.Mode&unix.S_IFMT == unix.S_IFDIR { - err = internal.ErrInvalidDirectory - } - return fmt.Errorf("%w %q", err, file.Name()) - } - return nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go deleted file mode 100644 index 77549c7a9..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package fd - -import ( - "os" - "runtime" - - "golang.org/x/sys/unix" -) - -// Fsopen is an [Fd]-based wrapper around unix.Fsopen. -func Fsopen(fsName string, flags int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSOPEN_CLOEXEC - fd, err := unix.Fsopen(fsName, flags) - if err != nil { - return nil, os.NewSyscallError("fsopen "+fsName, err) - } - return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil -} - -// Fsmount is an [Fd]-based wrapper around unix.Fsmount. -func Fsmount(ctx Fd, flags, mountAttrs int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSMOUNT_CLOEXEC - fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) - if err != nil { - return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) - } - return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil -} - -// OpenTree is an [Fd]-based wrapper around unix.OpenTree. -func OpenTree(dir Fd, path string, flags uint) (*os.File, error) { - dirFd, fullPath := prepareAt(dir, path) - // Make sure we always set O_CLOEXEC. - flags |= unix.OPEN_TREE_CLOEXEC - fd, err := unix.OpenTree(dirFd, path, flags) - if err != nil { - return nil, &os.PathError{Op: "open_tree", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - return os.NewFile(uintptr(fd), fullPath), nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go deleted file mode 100644 index 230530835..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package fd - -import ( - "errors" - "os" - "runtime" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" -) - -func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { - // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve - // ".." while a mount or rename occurs anywhere on the system. This could - // happen spuriously, or as the result of an attacker trying to mess with - // us during lookup. - // - // In addition, scoped lookups have a "safety check" at the end of - // complete_walk which will return -EXDEV if the final path is not in the - // root. - return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && - (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) -} - -const scopedLookupMaxRetries = 32 - -// Openat2 is an [Fd]-based wrapper around unix.Openat2, but with some retry -// logic in case of EAGAIN errors. -func Openat2(dir Fd, path string, how *unix.OpenHow) (*os.File, error) { - dirFd, fullPath := prepareAt(dir, path) - // Make sure we always set O_CLOEXEC. - how.Flags |= unix.O_CLOEXEC - var tries int - for tries < scopedLookupMaxRetries { - fd, err := unix.Openat2(dirFd, path, how) - if err != nil { - if scopedLookupShouldRetry(how, err) { - // We retry a couple of times to avoid the spurious errors, and - // if we are being attacked then returning -EAGAIN is the best - // we can do. - tries++ - continue - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} - } - runtime.KeepAlive(dir) - return os.NewFile(uintptr(fd), fullPath), nil - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: internal.ErrPossibleAttack} -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md deleted file mode 100644 index 5dcb6ae00..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md +++ /dev/null @@ -1,10 +0,0 @@ -## gocompat ## - -This directory contains backports of stdlib functions from later Go versions so -the filepath-securejoin can continue to be used by projects that are stuck with -Go 1.18 support. Note that often filepath-securejoin is added in security -patches for old releases, so avoiding the need to bump Go compiler requirements -is a huge plus to downstreams. - -The source code is licensed under the same license as the Go stdlib. See the -source files for the precise license information. diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go deleted file mode 100644 index 4b1803f58..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -//go:build linux && go1.20 - -// Copyright (C) 2025 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gocompat includes compatibility shims (backported from future Go -// stdlib versions) to permit filepath-securejoin to be used with older Go -// versions (often filepath-securejoin is added in security patches for old -// releases, so avoiding the need to bump Go compiler requirements is a huge -// plus to downstreams). -package gocompat diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go deleted file mode 100644 index 4a114bd3d..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -//go:build linux && go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocompat - -import ( - "fmt" -) - -// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func WrapBaseError(baseErr, extraErr error) error { - return fmt.Errorf("%w: %w", extraErr, baseErr) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go deleted file mode 100644 index 3061016a6..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocompat - -import ( - "fmt" -) - -type wrappedError struct { - inner error - isError error -} - -func (err wrappedError) Is(target error) bool { - return err.isError == target -} - -func (err wrappedError) Unwrap() error { - return err.inner -} - -func (err wrappedError) Error() string { - return fmt.Sprintf("%v: %v", err.isError, err.inner) -} - -// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func WrapBaseError(baseErr, extraErr error) error { - return wrappedError{ - inner: baseErr, - isError: extraErr, - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go deleted file mode 100644 index d4a938186..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && go1.21 - -// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocompat - -import ( - "cmp" - "slices" - "sync" -) - -// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. -func SlicesDeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { - return slices.DeleteFunc(slice, delFn) -} - -// SlicesContains is equivalent to Go 1.21's slices.Contains. -func SlicesContains[S ~[]E, E comparable](slice S, val E) bool { - return slices.Contains(slice, val) -} - -// SlicesClone is equivalent to Go 1.21's slices.Clone. -func SlicesClone[S ~[]E, E any](slice S) S { - return slices.Clone(slice) -} - -// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. -func SyncOnceValue[T any](f func() T) func() T { - return sync.OnceValue(f) -} - -// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. -func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - return sync.OnceValues(f) -} - -// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. -type CmpOrdered = cmp.Ordered - -// CmpCompare is equivalent to Go 1.21's cmp.Compare. -func CmpCompare[T CmpOrdered](x, y T) int { - return cmp.Compare(x, y) -} - -// Max2 is equivalent to Go 1.21's max builtin (but only for two parameters). -func Max2[T CmpOrdered](x, y T) T { - return max(x, y) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go deleted file mode 100644 index 0ea6218aa..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !go1.21 - -// Copyright (C) 2021, 2022 The Go Authors. All rights reserved. -// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -package gocompat - -import ( - "sync" -) - -// These are very minimal implementations of functions that appear in Go 1.21's -// stdlib, included so that we can build on older Go versions. Most are -// borrowed directly from the stdlib, and a few are modified to be "obviously -// correct" without needing to copy too many other helpers. - -// clearSlice is equivalent to Go 1.21's builtin clear. -// Copied from the Go 1.24 stdlib implementation. -func clearSlice[S ~[]E, E any](slice S) { - var zero E - for i := range slice { - slice[i] = zero - } -} - -// slicesIndexFunc is equivalent to Go 1.21's slices.IndexFunc. -// Copied from the Go 1.24 stdlib implementation. -func slicesIndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. -// Copied from the Go 1.24 stdlib implementation. -func SlicesDeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := slicesIndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// SlicesContains is equivalent to Go 1.21's slices.Contains. -// Similar to the stdlib slices.Contains, except that we don't have -// slices.Index so we need to use slices.IndexFunc for this non-Func helper. -func SlicesContains[S ~[]E, E comparable](s S, v E) bool { - return slicesIndexFunc(s, func(e E) bool { return e == v }) >= 0 -} - -// SlicesClone is equivalent to Go 1.21's slices.Clone. -// Copied from the Go 1.24 stdlib implementation. -func SlicesClone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. -// Copied from the Go 1.25 stdlib implementation. -func SyncOnceValue[T any](f func() T) func() T { - // Use a struct so that there's a single heap allocation. - d := struct { - f func() T - once sync.Once - valid bool - p any - result T - }{ - f: f, - } - return func() T { - d.once.Do(func() { - defer func() { - d.f = nil - d.p = recover() - if !d.valid { - panic(d.p) - } - }() - d.result = d.f() - d.valid = true - }) - if !d.valid { - panic(d.p) - } - return d.result - } -} - -// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. -// Copied from the Go 1.25 stdlib implementation. -func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - // Use a struct so that there's a single heap allocation. - d := struct { - f func() (T1, T2) - once sync.Once - valid bool - p any - r1 T1 - r2 T2 - }{ - f: f, - } - return func() (T1, T2) { - d.once.Do(func() { - defer func() { - d.f = nil - d.p = recover() - if !d.valid { - panic(d.p) - } - }() - d.r1, d.r2 = d.f() - d.valid = true - }) - if !d.valid { - panic(d.p) - } - return d.r1, d.r2 - } -} - -// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. -// Copied from the Go 1.25 stdlib implementation. -type CmpOrdered interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 | - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | - ~float32 | ~float64 | - ~string -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -// Copied from the Go 1.25 stdlib implementation. -func isNaN[T CmpOrdered](x T) bool { - return x != x -} - -// CmpCompare is equivalent to Go 1.21's cmp.Compare. -// Copied from the Go 1.25 stdlib implementation. -func CmpCompare[T CmpOrdered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN { - if yNaN { - return 0 - } - return -1 - } - if yNaN { - return +1 - } - if x < y { - return -1 - } - if x > y { - return +1 - } - return 0 -} - -// Max2 is equivalent to Go 1.21's max builtin for two parameters. -func Max2[T CmpOrdered](x, y T) T { - m := x - if y > m { - m = y - } - return m -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go deleted file mode 100644 index cb6de4186..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go +++ /dev/null @@ -1,123 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause - -// Copyright (C) 2022 The Go Authors. All rights reserved. -// Copyright (C) 2025 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// The parsing logic is very loosely based on the Go stdlib's -// src/internal/syscall/unix/kernel_version_linux.go but with an API that looks -// a bit like runc's libcontainer/system/kernelversion. -// -// TODO(cyphar): This API has been copied around to a lot of different projects -// (Docker, containerd, runc, and now filepath-securejoin) -- maybe we should -// put it in a separate project? - -// Package kernelversion provides a simple mechanism for checking whether the -// running kernel is at least as new as some baseline kernel version. This is -// often useful when checking for features that would be too complicated to -// test support for (or in cases where we know that some kernel features in -// backport-heavy kernels are broken and need to be avoided). -package kernelversion - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" -) - -// KernelVersion is a numeric representation of the key numerical elements of a -// kernel version (for instance, "4.1.2-default-1" would be represented as -// KernelVersion{4, 1, 2}). -type KernelVersion []uint64 - -func (kver KernelVersion) String() string { - var str strings.Builder - for idx, elem := range kver { - if idx != 0 { - _, _ = str.WriteRune('.') - } - _, _ = str.WriteString(strconv.FormatUint(elem, 10)) - } - return str.String() -} - -var errInvalidKernelVersion = errors.New("invalid kernel version") - -// parseKernelVersion parses a string and creates a KernelVersion based on it. -func parseKernelVersion(kverStr string) (KernelVersion, error) { - kver := make(KernelVersion, 1, 3) - for idx, ch := range kverStr { - if '0' <= ch && ch <= '9' { - v := &kver[len(kver)-1] - *v = (*v * 10) + uint64(ch-'0') - } else { - if idx == 0 || kverStr[idx-1] < '0' || '9' < kverStr[idx-1] { - // "." must be preceded by a digit while in version section - return nil, fmt.Errorf("%w %q: kernel version has dot(s) followed by non-digit in version section", errInvalidKernelVersion, kverStr) - } - if ch != '.' { - break - } - kver = append(kver, 0) - } - } - if len(kver) < 2 { - return nil, fmt.Errorf("%w %q: kernel versions must contain at least two components", errInvalidKernelVersion, kverStr) - } - return kver, nil -} - -// getKernelVersion gets the current kernel version. -var getKernelVersion = gocompat.SyncOnceValues(func() (KernelVersion, error) { - var uts unix.Utsname - if err := unix.Uname(&uts); err != nil { - return nil, err - } - // Remove the \x00 from the release. - release := uts.Release[:] - return parseKernelVersion(string(release[:bytes.IndexByte(release, 0)])) -}) - -// GreaterEqualThan returns true if the the host kernel version is greater than -// or equal to the provided [KernelVersion]. When doing this comparison, any -// non-numerical suffixes of the host kernel version are ignored. -// -// If the number of components provided is not equal to the number of numerical -// components of the host kernel version, any missing components are treated as -// 0. This means that GreaterEqualThan(KernelVersion{4}) will be treated the -// same as GreaterEqualThan(KernelVersion{4, 0, 0, ..., 0, 0}), and that if the -// host kernel version is "4" then GreaterEqualThan(KernelVersion{4, 1}) will -// return false (because the host version will be treated as "4.0"). -func GreaterEqualThan(wantKver KernelVersion) (bool, error) { - hostKver, err := getKernelVersion() - if err != nil { - return false, err - } - - // Pad out the kernel version lengths to match one another. - cmpLen := gocompat.Max2(len(hostKver), len(wantKver)) - hostKver = append(hostKver, make(KernelVersion, cmpLen-len(hostKver))...) - wantKver = append(wantKver, make(KernelVersion, cmpLen-len(wantKver))...) - - for i := 0; i < cmpLen; i++ { - switch gocompat.CmpCompare(hostKver[i], wantKver[i]) { - case -1: - // host < want - return false, nil - case +1: - // host > want - return true, nil - case 0: - continue - } - } - // equal version values - return true, nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go deleted file mode 100644 index 4635714f6..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package linux returns information about what features are supported on the -// running kernel. -package linux diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go deleted file mode 100644 index b29905bff..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package linux - -import ( - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion" -) - -// HasNewMountAPI returns whether the new fsopen(2) mount API is supported on -// the running kernel. -var HasNewMountAPI = gocompat.SyncOnceValue(func() bool { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.2[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err != nil { - return false - } - _ = unix.Close(fd) - - // RHEL 8 has a backport of fsopen(2) that appears to have some very - // difficult to debug performance pathology. As such, it seems prudent to - // simply reject pre-5.2 kernels. - isNotBackport, _ := kernelversion.GreaterEqualThan(kernelversion.KernelVersion{5, 2}) - return isNotBackport -}) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go deleted file mode 100644 index 399609dc3..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package linux - -import ( - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" -) - -// HasOpenat2 returns whether openat2(2) is supported on the running kernel. -var HasOpenat2 = gocompat.SyncOnceValue(func() bool { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go deleted file mode 100644 index 21e0a62e8..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go +++ /dev/null @@ -1,544 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package procfs provides a safe API for operating on /proc on Linux. Note -// that this is the *internal* procfs API, mainy needed due to Go's -// restrictions on cyclic dependencies and its incredibly minimal visibility -// system without making a separate internal/ package. -package procfs - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "strconv" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" -) - -// The kernel guarantees that the root inode of a procfs mount has an -// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. -const ( - procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC - procRootIno = 1 // PROC_ROOT_INO -) - -// verifyProcHandle checks that the handle is from a procfs filesystem. -// Contrast this to [verifyProcRoot], which also verifies that the handle is -// the root of a procfs mount. -func verifyProcHandle(procHandle fd.Fd) error { - if statfs, err := fd.Fstatfs(procHandle); err != nil { - return err - } else if statfs.Type != procSuperMagic { - return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - return nil -} - -// verifyProcRoot verifies that the handle is the root of a procfs filesystem. -// Contrast this to [verifyProcHandle], which only verifies if the handle is -// some file on procfs (regardless of what file it is). -func verifyProcRoot(procRoot fd.Fd) error { - if err := verifyProcHandle(procRoot); err != nil { - return err - } - if stat, err := fd.Fstat(procRoot); err != nil { - return err - } else if stat.Ino != procRootIno { - return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) - } - return nil -} - -type procfsFeatures struct { - // hasSubsetPid was added in Linux 5.8, along with hidepid=ptraceable (and - // string-based hidepid= values). Before this patchset, it was not really - // safe to try to modify procfs superblock flags because the superblock was - // shared -- so if this feature is not available, **you should not set any - // superblock flags**. - // - // 6814ef2d992a ("proc: add option to mount only a pids subset") - // fa10fed30f25 ("proc: allow to mount many instances of proc in one pid namespace") - // 24a71ce5c47f ("proc: instantiate only pids that we can ptrace on 'hidepid=4' mount option") - // 1c6c4d112e81 ("proc: use human-readable values for hidepid") - // 9ff7258575d5 ("Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace") - hasSubsetPid bool -} - -var getProcfsFeatures = gocompat.SyncOnceValue(func() procfsFeatures { - if !linux.HasNewMountAPI() { - return procfsFeatures{} - } - procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return procfsFeatures{} - } - defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here - - return procfsFeatures{ - hasSubsetPid: unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") == nil, - } -}) - -func newPrivateProcMount(subset bool) (_ *Handle, Err error) { - procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return nil, err - } - defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here - - if subset && getProcfsFeatures().hasSubsetPid { - // Try to configure hidepid=ptraceable,subset=pid if possible, but - // ignore errors. - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") - } - - // Get an actual handle. - if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { - return nil, os.NewSyscallError("fsconfig create procfs", err) - } - // TODO: Output any information from the fscontext log to debug logs. - procRoot, err := fd.Fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - return newHandle(procRoot) -} - -func clonePrivateProcMount() (_ *Handle, Err error) { - // Try to make a clone without using AT_RECURSIVE if we can. If this works, - // we can be sure there are no over-mounts and so if the root is valid then - // we're golden. Otherwise, we have to deal with over-mounts. - procRoot, err := fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procRoot) { - procRoot, err = fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) - } - if err != nil { - return nil, fmt.Errorf("creating a detached procfs clone: %w", err) - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - return newHandle(procRoot) -} - -func privateProcRoot(subset bool) (*Handle, error) { - if !linux.HasNewMountAPI() || hookForceGetProcRootUnsafe() { - return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) - } - // Try to create a new procfs mount from scratch if we can. This ensures we - // can get a procfs mount even if /proc is fake (for whatever reason). - procRoot, err := newPrivateProcMount(subset) - if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { - // Try to clone /proc then... - procRoot, err = clonePrivateProcMount() - } - return procRoot, err -} - -func unsafeHostProcRoot() (_ *Handle, Err error) { - procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - return newHandle(procRoot) -} - -// Handle is a wrapper around an *os.File handle to "/proc", which can be used -// to do further procfs-related operations in a safe way. -type Handle struct { - Inner fd.Fd - // Does this handle have subset=pid set? - isSubset bool -} - -func newHandle(procRoot fd.Fd) (*Handle, error) { - if err := verifyProcRoot(procRoot); err != nil { - // This is only used in methods that - _ = procRoot.Close() - return nil, err - } - proc := &Handle{Inner: procRoot} - // With subset=pid we can be sure that /proc/uptime will not exist. - if err := fd.Faccessat(proc.Inner, "uptime", unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil { - proc.isSubset = errors.Is(err, os.ErrNotExist) - } - return proc, nil -} - -// Close closes the underlying file for the Handle. -func (proc *Handle) Close() error { return proc.Inner.Close() } - -var getCachedProcRoot = gocompat.SyncOnceValue(func() *Handle { - procRoot, err := getProcRoot(true) - if err != nil { - return nil // just don't cache if we see an error - } - if !procRoot.isSubset { - return nil // we only cache verified subset=pid handles - } - - // Disarm (*Handle).Close() to stop someone from accidentally closing - // the global handle. - procRoot.Inner = fd.NopCloser(procRoot.Inner) - return procRoot -}) - -// OpenProcRoot tries to open a "safer" handle to "/proc". -func OpenProcRoot() (*Handle, error) { - if proc := getCachedProcRoot(); proc != nil { - return proc, nil - } - return getProcRoot(true) -} - -// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or -// masked paths (but also without "subset=pid"). -func OpenUnsafeProcRoot() (*Handle, error) { return getProcRoot(false) } - -func getProcRoot(subset bool) (*Handle, error) { - proc, err := privateProcRoot(subset) - if err != nil { - // Fall back to using a /proc handle if making a private mount failed. - // If we have openat2, at least we can avoid some kinds of over-mount - // attacks, but without openat2 there's not much we can do. - proc, err = unsafeHostProcRoot() - } - return proc, err -} - -var hasProcThreadSelf = gocompat.SyncOnceValue(func() bool { - return unix.Access("/proc/thread-self/", unix.F_OK) == nil -}) - -var errUnsafeProcfs = errors.New("unsafe procfs detected") - -// lookup is a very minimal wrapper around [procfsLookupInRoot] which is -// intended to be called from the external API. -func (proc *Handle) lookup(subpath string) (*os.File, error) { - handle, err := procfsLookupInRoot(proc.Inner, subpath) - if err != nil { - return nil, err - } - return handle, nil -} - -// procfsBase is an enum indicating the prefix of a subpath in operations -// involving [Handle]s. -type procfsBase string - -const ( - // ProcRoot refers to the root of the procfs (i.e., "/proc/"). - ProcRoot procfsBase = "/proc" - // ProcSelf refers to the current process' subdirectory (i.e., - // "/proc/self/"). - ProcSelf procfsBase = "/proc/self" - // ProcThreadSelf refers to the current thread's subdirectory (i.e., - // "/proc/thread-self/"). In multi-threaded programs (i.e., all Go - // programs) where one thread has a different CLONE_FS, it is possible for - // "/proc/self" to point the wrong thread and so "/proc/thread-self" may be - // necessary. Note that on pre-3.17 kernels, "/proc/thread-self" doesn't - // exist and so a fallback will be used in that case. - ProcThreadSelf procfsBase = "/proc/thread-self" - // TODO: Switch to an interface setup so we can have a more type-safe - // version of ProcPid and remove the need to worry about invalid string - // values. -) - -// prefix returns a prefix that can be used with the given [Handle]. -func (base procfsBase) prefix(proc *Handle) (string, error) { - switch base { - case ProcRoot: - return ".", nil - case ProcSelf: - return "self", nil - case ProcThreadSelf: - threadSelf := "thread-self" - if !hasProcThreadSelf() || hookForceProcSelfTask() { - // Pre-3.17 kernels don't have /proc/thread-self, so do it - // manually. - threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) - if err := fd.Faccessat(proc.Inner, threadSelf, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { - // In this case, we running in a pid namespace that doesn't - // match the /proc mount we have. This can happen inside runc. - // - // Unfortunately, there is no nice way to get the correct TID - // to use here because of the age of the kernel, so we have to - // just use /proc/self and hope that it works. - threadSelf = "self" - } - } - return threadSelf, nil - } - return "", fmt.Errorf("invalid procfs base %q", base) -} - -// ProcThreadSelfCloser is a callback that needs to be called when you are done -// operating on an [os.File] fetched using [ProcThreadSelf]. -// -// [os.File]: https://pkg.go.dev/os#File -type ProcThreadSelfCloser func() - -// open is the core lookup operation for [Handle]. It returns a handle to -// "/proc//". If the returned [ProcThreadSelfCloser] is non-nil, -// you should call it after you are done interacting with the returned handle. -// -// In general you should use prefer to use the other helpers, as they remove -// the need to interact with [procfsBase] and do not return a nil -// [ProcThreadSelfCloser] for [procfsBase] values other than [ProcThreadSelf] -// where it is necessary. -func (proc *Handle) open(base procfsBase, subpath string) (_ *os.File, closer ProcThreadSelfCloser, Err error) { - prefix, err := base.prefix(proc) - if err != nil { - return nil, nil, err - } - subpath = prefix + "/" + subpath - - switch base { - case ProcRoot: - file, err := proc.lookup(subpath) - if errors.Is(err, os.ErrNotExist) { - // The Handle handle in use might be a subset=pid one, which will - // result in spurious errors. In this case, just open a temporary - // unmasked procfs handle for this operation. - proc, err2 := OpenUnsafeProcRoot() // !subset=pid - if err2 != nil { - return nil, nil, err - } - defer proc.Close() //nolint:errcheck // close failures aren't critical here - - file, err = proc.lookup(subpath) - } - return file, nil, err - - case ProcSelf: - file, err := proc.lookup(subpath) - return file, nil, err - - case ProcThreadSelf: - // We need to lock our thread until the caller is done with the handle - // because between getting the handle and using it we could get - // interrupted by the Go runtime and hit the case where the underlying - // thread is swapped out and the original thread is killed, resulting - // in pull-your-hair-out-hard-to-debug issues in the caller. - runtime.LockOSThread() - defer func() { - if Err != nil { - runtime.UnlockOSThread() - closer = nil - } - }() - - file, err := proc.lookup(subpath) - return file, runtime.UnlockOSThread, err - } - // should never be reached - return nil, nil, fmt.Errorf("[internal error] invalid procfs base %q", base) -} - -// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an -// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). -// Once finished with the handle, you must call the returned closer function -// (runtime.UnlockOSThread). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -func (proc *Handle) OpenThreadSelf(subpath string) (_ *os.File, _ ProcThreadSelfCloser, Err error) { - return proc.open(ProcThreadSelf, subpath) -} - -// OpenSelf returns a handle to /proc/self/. -func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { - file, closer, err := proc.open(ProcSelf, subpath) - assert.Assert(closer == nil, "closer for ProcSelf must be nil") - return file, err -} - -// OpenRoot returns a handle to /proc/. -func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { - file, closer, err := proc.open(ProcRoot, subpath) - assert.Assert(closer == nil, "closer for ProcRoot must be nil") - return file, err -} - -// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). -// This is mainly intended for usage when operating on other processes. -func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { - return proc.OpenRoot(strconv.Itoa(pid) + "/" + subpath) -} - -// checkSubpathOvermount checks if the dirfd and path combination is on the -// same mount as the given root. -func checkSubpathOvermount(root, dir fd.Fd, path string) error { - // Get the mntID of our procfs handle. - expectedMountID, err := fd.GetMountID(root, "") - if err != nil { - return fmt.Errorf("get root mount id: %w", err) - } - // Get the mntID of the target magic-link. - gotMountID, err := fd.GetMountID(dir, path) - if err != nil { - return fmt.Errorf("get subpath mount id: %w", err) - } - // As long as the directory mount is alive, even with wrapping mount IDs, - // we would expect to see a different mount ID here. (Of course, if we're - // using unsafeHostProcRoot() then an attaker could change this after we - // did this check.) - if expectedMountID != gotMountID { - return fmt.Errorf("%w: subpath %s/%s has an overmount obscuring the real path (mount ids do not match %d != %d)", - errUnsafeProcfs, dir.Name(), path, expectedMountID, gotMountID) - } - return nil -} - -// Readlink performs a readlink operation on "/proc//" in a way -// that should be free from race attacks. This is most commonly used to get the -// real path of a file by looking at "/proc/self/fd/$n", with the same safety -// protections as [Open] (as well as some additional checks against -// overmounts). -func (proc *Handle) Readlink(base procfsBase, subpath string) (string, error) { - link, closer, err := proc.open(base, subpath) - if closer != nil { - defer closer() - } - if err != nil { - return "", fmt.Errorf("get safe %s/%s handle: %w", base, subpath, err) - } - defer link.Close() //nolint:errcheck // close failures aren't critical here - - // Try to detect if there is a mount on top of the magic-link. This should - // be safe in general (a mount on top of the path afterwards would not - // affect the handle itself) and will definitely be safe if we are using - // privateProcRoot() (at least since Linux 5.12[1], when anonymous mount - // namespaces were completely isolated from external mounts including mount - // propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - if err := checkSubpathOvermount(proc.Inner, link, ""); err != nil { - return "", fmt.Errorf("check safety of %s/%s magiclink: %w", base, subpath, err) - } - - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit - // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty - // relative pathnames"). - return fd.Readlinkat(link, "") -} - -// ProcSelfFdReadlink gets the real path of the given file by looking at -// readlink(/proc/thread-self/fd/$n). -// -// This is just a wrapper around [Handle.Readlink]. -func ProcSelfFdReadlink(fd fd.Fd) (string, error) { - procRoot, err := OpenProcRoot() // subset=pid - if err != nil { - return "", err - } - defer procRoot.Close() //nolint:errcheck // close failures aren't critical here - - fdPath := "fd/" + strconv.Itoa(int(fd.Fd())) - return procRoot.Readlink(ProcThreadSelf, fdPath) -} - -// CheckProcSelfFdPath returns whether the given file handle matches the -// expected path. (This is inherently racy.) -func CheckProcSelfFdPath(path string, file fd.Fd) error { - if err := fd.IsDeadInode(file); err != nil { - return err - } - actualPath, err := ProcSelfFdReadlink(file) - if err != nil { - return fmt.Errorf("get path of handle: %w", err) - } - if actualPath != path { - return fmt.Errorf("%w: handle path %q doesn't match expected path %q", internal.ErrPossibleBreakout, actualPath, path) - } - return nil -} - -// ReopenFd takes an existing file descriptor and "re-opens" it through -// /proc/thread-self/fd/. This allows for O_PATH file descriptors to be -// upgraded to regular file descriptors, as well as changing the open mode of a -// regular file descriptor. Some filesystems have unique handling of open(2) -// which make this incredibly useful (such as /dev/ptmx). -func ReopenFd(handle fd.Fd, flags int) (*os.File, error) { - procRoot, err := OpenProcRoot() // subset=pid - if err != nil { - return nil, err - } - defer procRoot.Close() //nolint:errcheck // close failures aren't critical here - - // We can't operate on /proc/thread-self/fd/$n directly when doing a - // re-open, so we need to open /proc/thread-self/fd and then open a single - // final component. - procFdDir, closer, err := procRoot.OpenThreadSelf("fd/") - if err != nil { - return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) - } - defer procFdDir.Close() //nolint:errcheck // close failures aren't critical here - defer closer() - - // Try to detect if there is a mount on top of the magic-link we are about - // to open. If we are using unsafeHostProcRoot(), this could change after - // we check it (and there's nothing we can do about that) but for - // privateProcRoot() this should be guaranteed to be safe (at least since - // Linux 5.12[1], when anonymous mount namespaces were completely isolated - // from external mounts including mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - fdStr := strconv.Itoa(int(handle.Fd())) - if err := checkSubpathOvermount(procRoot.Inner, procFdDir, fdStr); err != nil { - return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) - } - - flags |= unix.O_CLOEXEC - // Rather than just wrapping fd.Openat, open-code it so we can copy - // handle.Name(). - reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) - if err != nil { - return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) - } - return os.NewFile(uintptr(reopenFd), handle.Name()), nil -} - -// Test hooks used in the procfs tests to verify that the fallback logic works. -// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. -var ( - hookForcePrivateProcRootOpenTree = hookDummyFile - hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile - hookForceGetProcRootUnsafe = hookDummy - - hookForceProcSelfTask = hookDummy - hookForceProcSelf = hookDummy -) - -func hookDummy() bool { return false } -func hookDummyFile(_ io.Closer) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go deleted file mode 100644 index 1ad1f18ee..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go +++ /dev/null @@ -1,222 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// This code is adapted to be a minimal version of the libpathrs proc resolver -// . -// As we only need O_PATH|O_NOFOLLOW support, this is not too much to port. - -package procfs - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/internal/consts" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" -) - -// procfsLookupInRoot is a stripped down version of completeLookupInRoot, -// entirely designed to support the very small set of features necessary to -// make procfs handling work. Unlike completeLookupInRoot, we always have -// O_PATH|O_NOFOLLOW behaviour for trailing symlinks. -// -// The main restrictions are: -// -// - ".." is not supported (as it requires either os.Root-style replays, -// which is more bug-prone; or procfs verification, which is not possible -// due to re-entrancy issues). -// - Absolute symlinks for the same reason (and all absolute symlinks in -// procfs are magic-links, which we want to skip anyway). -// - If statx is supported (checkSymlinkOvermount), any mount-point crossings -// (which is the main attack of concern against /proc). -// - Partial lookups are not supported, so the symlink stack is not needed. -// - Trailing slash special handling is not necessary in most cases (if we -// operating on procfs, it's usually with programmer-controlled strings -// that will then be re-opened), so we skip it since whatever re-opens it -// can deal with it. It's a creature comfort anyway. -// -// If the system supports openat2(), this is implemented using equivalent flags -// (RESOLVE_BENEATH | RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS). -func procfsLookupInRoot(procRoot fd.Fd, unsafePath string) (Handle *os.File, _ error) { - unsafePath = filepath.ToSlash(unsafePath) // noop - - // Make sure that an empty unsafe path still returns something sane, even - // with openat2 (which doesn't have AT_EMPTY_PATH semantics yet). - if unsafePath == "" { - unsafePath = "." - } - - // This is already checked by getProcRoot, but make sure here since the - // core security of this lookup is based on this assumption. - if err := verifyProcRoot(procRoot); err != nil { - return nil, err - } - - if linux.HasOpenat2() { - // We prefer being able to use RESOLVE_NO_XDEV if we can, to be - // absolutely sure we are operating on a clean /proc handle that - // doesn't have any cheeky overmounts that could trick us (including - // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // strictly needed, but just use it since we have it. - // - // NOTE: /proc/self is technically a magic-link (the contents of the - // symlink are generated dynamically), but it doesn't use - // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. - // - // TODO: It would be nice to have RESOLVE_NO_DOTDOT, purely for - // self-consistency with the backup O_PATH resolver. - handle, err := fd.Openat2(procRoot, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, - }) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - // err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, gocompat.WrapBaseError(err, errUnsafeProcfs) - } - return handle, nil - } - - // To mirror openat2(RESOLVE_BENEATH), we need to return an error if the - // path is absolute. - if path.IsAbs(unsafePath) { - return nil, fmt.Errorf("%w: cannot resolve absolute paths in procfs resolver", internal.ErrPossibleBreakout) - } - - currentDir, err := fd.Dup(procRoot) - if err != nil { - return nil, fmt.Errorf("clone root fd: %w", err) - } - defer func() { - // If a handle is not returned, close the internal handle. - if Handle == nil { - _ = currentDir.Close() - } - }() - - var ( - linksWalked int - currentPath string - remainingPath = unsafePath - ) - for remainingPath != "" { - // Get the next path component. - var part string - if i := strings.IndexByte(remainingPath, '/'); i == -1 { - part, remainingPath = remainingPath, "" - } else { - part, remainingPath = remainingPath[:i], remainingPath[i+1:] - } - if part == "" { - // no-op component, but treat it the same as "." - part = "." - } - if part == ".." { - // not permitted - return nil, fmt.Errorf("%w: cannot walk into '..' in procfs resolver", internal.ErrPossibleBreakout) - } - - // Apply the component lexically to the path we are building. - // currentPath does not contain any symlinks, and we are lexically - // dealing with a single component, so it's okay to do a filepath.Clean - // here. (Not to mention that ".." isn't allowed.) - nextPath := path.Join("/", currentPath, part) - // If we logically hit the root, just clone the root rather than - // opening the part and doing all of the other checks. - if nextPath == "/" { - // Jump to root. - rootClone, err := fd.Dup(procRoot) - if err != nil { - return nil, fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = nextPath - continue - } - - // Try to open the next component. - nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - - // Make sure we are still on procfs and haven't crossed mounts. - if err := verifyProcHandle(nextDir); err != nil { - _ = nextDir.Close() - return nil, fmt.Errorf("check %q component is on procfs: %w", part, err) - } - if err := checkSubpathOvermount(procRoot, nextDir, ""); err != nil { - _ = nextDir.Close() - return nil, fmt.Errorf("check %q component is not overmounted: %w", part, err) - } - - // We are emulating O_PATH|O_NOFOLLOW, so we only need to traverse into - // trailing symlinks if we are not the final component. Otherwise we - // can just return the currentDir. - if remainingPath != "" { - st, err := nextDir.Stat() - if err != nil { - _ = nextDir.Close() - return nil, fmt.Errorf("stat component %q: %w", part, err) - } - - if st.Mode()&os.ModeType == os.ModeSymlink { - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See - // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and - // fstatat() with empty relative pathnames"). - linkDest, err := fd.Readlinkat(nextDir, "") - // We don't need the handle anymore. - _ = nextDir.Close() - if err != nil { - return nil, err - } - - linksWalked++ - if linksWalked > consts.MaxSymlinkLimit { - return nil, &os.PathError{Op: "securejoin.procfsLookupInRoot", Path: "/proc/" + unsafePath, Err: unix.ELOOP} - } - - // Update our logical remaining path. - remainingPath = linkDest + "/" + remainingPath - // Absolute symlinks are probably magiclinks, we reject them. - if path.IsAbs(linkDest) { - return nil, fmt.Errorf("%w: cannot jump to / in procfs resolver -- possible magiclink", internal.ErrPossibleBreakout) - } - continue - } - } - - // Walk into the next component. - _ = currentDir.Close() - currentDir = nextDir - currentPath = nextPath - } - - // One final sanity-check. - if err := verifyProcHandle(currentDir); err != nil { - return nil, fmt.Errorf("check final handle is on procfs: %w", err) - } - if err := checkSubpathOvermount(procRoot, currentDir, ""); err != nil { - return nil, fmt.Errorf("check final handle is not overmounted: %w", err) - } - return currentDir, nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go deleted file mode 100644 index f47504e66..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go +++ /dev/null @@ -1,399 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package pathrs - -import ( - "errors" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/internal/consts" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" -) - -type symlinkStackEntry struct { - // (dir, remainingPath) is what we would've returned if the link didn't - // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in - // this case. - dir *os.File - remainingPath string - // linkUnwalked is the remaining path components from the original - // Readlink which we have yet to walk. When this slice is empty, we - // drop the link from the stack. - linkUnwalked []string -} - -func (se symlinkStackEntry) String() string { - return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) -} - -func (se symlinkStackEntry) Close() { - _ = se.dir.Close() -} - -type symlinkStack []*symlinkStackEntry - -func (s *symlinkStack) IsEmpty() bool { - return s == nil || len(*s) == 0 -} - -func (s *symlinkStack) Close() { - if s != nil { - for _, link := range *s { - link.Close() - } - // TODO: Switch to clear once we switch to Go 1.21. - *s = nil - } -} - -var ( - errEmptyStack = errors.New("[internal] stack is empty") - errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") -) - -func (s *symlinkStack) popPart(part string) error { - if s == nil || s.IsEmpty() { - // If there is nothing in the symlink stack, then the part was from the - // real path provided by the user, and this is a no-op. - return errEmptyStack - } - if part == "." { - // "." components are no-ops -- we drop them when doing SwapLink. - return nil - } - - tailEntry := (*s)[len(*s)-1] - - // Double-check that we are popping the component we expect. - if len(tailEntry.linkUnwalked) == 0 { - return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) - } - headPart := tailEntry.linkUnwalked[0] - if headPart != part { - return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) - } - - // Drop the component, but keep the entry around in case we are dealing - // with a "tail-chained" symlink. - tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] - return nil -} - -func (s *symlinkStack) PopPart(part string) error { - if err := s.popPart(part); err != nil { - if errors.Is(err, errEmptyStack) { - // Skip empty stacks. - err = nil - } - return err - } - - // Clean up any of the trailing stack entries that are empty. - for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { - entry := (*s)[lastGood] - if len(entry.linkUnwalked) > 0 { - break - } - entry.Close() - (*s) = (*s)[:lastGood] - } - return nil -} - -func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { - if s == nil { - return nil - } - // Split the link target and clean up any "" parts. - linkTargetParts := gocompat.SlicesDeleteFunc( - strings.Split(linkTarget, "/"), - func(part string) bool { return part == "" || part == "." }) - - // Copy the directory so the caller doesn't close our copy. - dirCopy, err := fd.Dup(dir) - if err != nil { - return err - } - - // Add to the stack. - *s = append(*s, &symlinkStackEntry{ - dir: dirCopy, - remainingPath: remainingPath, - linkUnwalked: linkTargetParts, - }) - return nil -} - -func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { - // If we are currently inside a symlink resolution, remove the symlink - // component from the last symlink entry, but don't remove the entry even - // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we - // hit during a symlink resolution) we need to keep the old symlink until - // we finish the resolution. - if err := s.popPart(linkPart); err != nil { - if !errors.Is(err, errEmptyStack) { - return err - } - // Push the component regardless of whether the stack was empty. - } - return s.push(dir, remainingPath, linkTarget) -} - -func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { - if s == nil || s.IsEmpty() { - return nil, "", false - } - tailEntry := (*s)[0] - *s = (*s)[1:] - return tailEntry.dir, tailEntry.remainingPath, true -} - -// partialLookupInRoot tries to lookup as much of the request path as possible -// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing -// component of the requested path, returning a file handle to the final -// existing component and a string containing the remaining path components. -func partialLookupInRoot(root fd.Fd, unsafePath string) (*os.File, string, error) { - return lookupInRoot(root, unsafePath, true) -} - -func completeLookupInRoot(root fd.Fd, unsafePath string) (*os.File, error) { - handle, remainingPath, err := lookupInRoot(root, unsafePath, false) - if remainingPath != "" && err == nil { - // should never happen - err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) - } - // lookupInRoot(partial=false) will always close the handle if an error is - // returned, so no need to double-check here. - return handle, err -} - -func lookupInRoot(root fd.Fd, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { - unsafePath = filepath.ToSlash(unsafePath) // noop - - // This is very similar to SecureJoin, except that we operate on the - // components using file descriptors. We then return the last component we - // managed open, along with the remaining path components not opened. - - // Try to use openat2 if possible. - if linux.HasOpenat2() { - return lookupOpenat2(root, unsafePath, partial) - } - - // Get the "actual" root path from /proc/self/fd. This is necessary if the - // root is some magic-link like /proc/$pid/root, in which case we want to - // make sure when we do procfs.CheckProcSelfFdPath that we are using the - // correct root path. - logicalRootPath, err := procfs.ProcSelfFdReadlink(root) - if err != nil { - return nil, "", fmt.Errorf("get real root path: %w", err) - } - - currentDir, err := fd.Dup(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - defer func() { - // If a handle is not returned, close the internal handle. - if Handle == nil { - _ = currentDir.Close() - } - }() - - // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats - // dangling symlinks. If we hit a non-existent path while resolving a - // symlink, we need to return the (dir, remainingPath) that we had when we - // hit the symlink (treating the symlink as though it were a regular file). - // The set of (dir, remainingPath) sets is stored within the symlinkStack - // and we add and remove parts when we hit symlink and non-symlink - // components respectively. We need a stack because of recursive symlinks - // (symlinks that contain symlink components in their target). - // - // Note that the stack is ONLY used for book-keeping. All of the actual - // path walking logic is still based on currentPath/remainingPath and - // currentDir (as in SecureJoin). - var symStack *symlinkStack - if partial { - symStack = new(symlinkStack) - defer symStack.Close() - } - - var ( - linksWalked int - currentPath string - remainingPath = unsafePath - ) - for remainingPath != "" { - // Save the current remaining path so if the part is not real we can - // return the path including the component. - oldRemainingPath := remainingPath - - // Get the next path component. - var part string - if i := strings.IndexByte(remainingPath, '/'); i == -1 { - part, remainingPath = remainingPath, "" - } else { - part, remainingPath = remainingPath[:i], remainingPath[i+1:] - } - // If we hit an empty component, we need to treat it as though it is - // "." so that trailing "/" and "//" components on a non-directory - // correctly return the right error code. - if part == "" { - part = "." - } - - // Apply the component lexically to the path we are building. - // currentPath does not contain any symlinks, and we are lexically - // dealing with a single component, so it's okay to do a filepath.Clean - // here. - nextPath := path.Join("/", currentPath, part) - // If we logically hit the root, just clone the root rather than - // opening the part and doing all of the other checks. - if nextPath == "/" { - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) - } - // Jump to root. - rootClone, err := fd.Dup(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = nextPath - continue - } - - // Try to open the next component. - nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - switch err { - case nil: - st, err := nextDir.Stat() - if err != nil { - _ = nextDir.Close() - return nil, "", fmt.Errorf("stat component %q: %w", part, err) - } - - switch st.Mode() & os.ModeType { //nolint:exhaustive // just a glorified if statement - case os.ModeSymlink: - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See - // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and - // fstatat() with empty relative pathnames"). - linkDest, err := fd.Readlinkat(nextDir, "") - // We don't need the handle anymore. - _ = nextDir.Close() - if err != nil { - return nil, "", err - } - - linksWalked++ - if linksWalked > consts.MaxSymlinkLimit { - return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} - } - - // Swap out the symlink's component for the link entry itself. - if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { - return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) - } - - // Update our logical remaining path. - remainingPath = linkDest + "/" + remainingPath - // Absolute symlinks reset any work we've already done. - if path.IsAbs(linkDest) { - // Jump to root. - rootClone, err := fd.Dup(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = "/" - } - - default: - // If we are dealing with a directory, simply walk into it. - _ = currentDir.Close() - currentDir = nextDir - currentPath = nextPath - - // The part was real, so drop it from the symlink stack. - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) - } - - // If we are operating on a .., make sure we haven't escaped. - // We only have to check for ".." here because walking down - // into a regular component component cannot cause you to - // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we - // have to check every ".." rather than only checking after a - // rename or mount on the system. - if part == ".." { - // Make sure the root hasn't moved. - if err := procfs.CheckProcSelfFdPath(logicalRootPath, root); err != nil { - return nil, "", fmt.Errorf("root path moved during lookup: %w", err) - } - // Make sure the path is what we expect. - fullPath := logicalRootPath + nextPath - if err := procfs.CheckProcSelfFdPath(fullPath, currentDir); err != nil { - return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) - } - } - } - - default: - if !partial { - return nil, "", err - } - // If there are any remaining components in the symlink stack, we - // are still within a symlink resolution and thus we hit a dangling - // symlink. So pretend that the first symlink in the stack we hit - // was an ENOENT (to match openat2). - if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { - _ = currentDir.Close() - return oldDir, remainingPath, err - } - // We have hit a final component that doesn't exist, so we have our - // partial open result. Note that we have to use the OLD remaining - // path, since the lookup failed. - return currentDir, oldRemainingPath, err - } - } - - // If the unsafePath had a trailing slash, we need to make sure we try to - // do a relative "." open so that we will correctly return an error when - // the final component is a non-directory (to match openat2). In the - // context of openat2, a trailing slash and a trailing "/." are completely - // equivalent. - if strings.HasSuffix(unsafePath, "/") { - nextDir, err := fd.Openat(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - if !partial { - _ = currentDir.Close() - currentDir = nil - } - return currentDir, "", err - } - _ = currentDir.Close() - currentDir = nextDir - } - - // All of the components existed! - return currentDir, "", nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go deleted file mode 100644 index f3c62b0da..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go +++ /dev/null @@ -1,246 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package pathrs - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" -) - -var errInvalidMode = errors.New("invalid permission mode") - -// modePermExt is like os.ModePerm except that it also includes the set[ug]id -// and sticky bits. -const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky - -//nolint:cyclop // this function needs to handle a lot of cases -func toUnixMode(mode os.FileMode) (uint32, error) { - sysMode := uint32(mode.Perm()) - if mode&os.ModeSetuid != 0 { - sysMode |= unix.S_ISUID - } - if mode&os.ModeSetgid != 0 { - sysMode |= unix.S_ISGID - } - if mode&os.ModeSticky != 0 { - sysMode |= unix.S_ISVTX - } - // We don't allow file type bits. - if mode&os.ModeType != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode) - } - // We don't allow other unknown modes. - if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode) - } - return sysMode, nil -} - -// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use -// in two respects: -// -// - The caller provides the root directory as an *[os.File] (preferably O_PATH) -// handle. This means that the caller can be sure which root directory is -// being used. Note that this can be emulated by using /proc/self/fd/... as -// the root path with [os.MkdirAll]. -// -// - Once all of the directories have been created, an *[os.File] O_PATH handle -// to the directory at unsafePath is returned to the caller. This is done in -// an effectively-race-free way (an attacker would only be able to swap the -// final directory component), which is not possible to emulate with -// [MkdirAll]. -// -// In addition, the returned handle is obtained far more efficiently than doing -// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after -// doing [MkdirAll]. If you intend to open the directory after creating it, you -// should use MkdirAllHandle. -// -// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin -func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { - unixMode, err := toUnixMode(mode) - if err != nil { - return nil, err - } - // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid - // bits. We could also silently ignore them but since we have very few - // users it seems more prudent to return an error so users notice that - // these bits will not be set. - if unixMode&^0o1777 != 0 { - return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) - } - - // Try to open as much of the path as possible. - currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) - defer func() { - if Err != nil { - _ = currentDir.Close() - } - }() - if err != nil && !errors.Is(err, unix.ENOENT) { - return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) - } - - // If there is an attacker deleting directories as we walk into them, - // detect this proactively. Note this is guaranteed to detect if the - // attacker deleted any part of the tree up to currentDir. - // - // Once we walk into a dead directory, partialLookupInRoot would not be - // able to walk further down the tree (directories must be empty before - // they are deleted), and if the attacker has removed the entire tree we - // can be sure that anything that was originally inside a dead directory - // must also be deleted and thus is a dead directory in its own right. - // - // This is mostly a quality-of-life check, because mkdir will simply fail - // later if the attacker deletes the tree after this check. - if err := fd.IsDeadInode(currentDir); err != nil { - return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) - } - - // Re-open the path to match the O_DIRECTORY reopen loop later (so that we - // always return a non-O_PATH handle). We also check that we actually got a - // directory. - if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { - return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) - } else if err != nil { - return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) - } else { //nolint:revive // indent-error-flow lint doesn't make sense here - _ = currentDir.Close() - currentDir = reopenDir - } - - remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if gocompat.SlicesContains(remainingParts, "..") { - // The path contained ".." components after the end of the "real" - // components. We could try to safely resolve ".." here but that would - // add a bunch of extra logic for something that it's not clear even - // needs to be supported. So just return an error. - // - // If we do filepath.Clean(remainingPath) then we end up with the - // problem that ".." can erase a trailing dangling symlink and produce - // a path that doesn't quite match what the user asked for. - return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) - } - - // Create the remaining components. - for _, part := range remainingParts { - switch part { - case "", ".": - // Skip over no-op paths. - continue - } - - // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely - // create the final component without worrying about symlink-exchange - // attacks. - // - // If we get -EEXIST, it's possible that another program created the - // directory at the same time as us. In that case, just continue on as - // if we created it (if the created inode is not a directory, the - // following open call will fail). - if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { - err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} - // Make the error a bit nicer if the directory is dead. - if deadErr := fd.IsDeadInode(currentDir); deadErr != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - // err = fmt.Errorf("%w (%w)", err, deadErr) - err = gocompat.WrapBaseError(err, deadErr) - } - return nil, err - } - - // Get a handle to the next component. O_DIRECTORY means we don't need - // to use O_PATH. - var nextDir *os.File - if linux.HasOpenat2() { - nextDir, err = openat2(currentDir, part, &unix.OpenHow{ - Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, - }) - } else { - nextDir, err = fd.Openat(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - } - if err != nil { - return nil, err - } - _ = currentDir.Close() - currentDir = nextDir - - // It's possible that the directory we just opened was swapped by an - // attacker. Unfortunately there isn't much we can do to protect - // against this, and MkdirAll's behaviour is that we will reuse - // existing directories anyway so the need to protect against this is - // incredibly limited (and arguably doesn't even deserve mention here). - // - // Ideally we might want to check that the owner and mode match what we - // would've created -- unfortunately, it is non-trivial to verify that - // the owner and mode of the created directory match. While plain Unix - // DAC rules seem simple enough to emulate, there are a bunch of other - // factors that can change the mode or owner of created directories - // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on - // filesystems like vfat, etc etc). We used to try to verify this but - // it just lead to a series of spurious errors. - // - // We could also check that the directory is non-empty, but - // unfortunately some pseduofilesystems (like cgroupfs) create - // non-empty directories, which would result in different spurious - // errors. - } - return currentDir, nil -} - -// MkdirAll is a race-safe alternative to the [os.MkdirAll] function, -// where the new directory is guaranteed to be within the root directory (if an -// attacker can move directories from inside the root to outside the root, the -// created directory tree might be outside of the root but the key constraint -// is that at no point will we walk outside of the directory tree we are -// creating). -// -// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// err := os.MkdirAll(path, mode) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is -// possible for MkdirAll to resolve unsafe symlink components and create -// directories outside of the root. -// -// If you plan to open the directory after you have created it or want to use -// an open directory handle as the root, you should use [MkdirAllHandle] instead. -// This function is a wrapper around [MkdirAllHandle]. -// -// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin -func MkdirAll(root, unsafePath string, mode os.FileMode) error { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return err - } - defer rootDir.Close() //nolint:errcheck // close failures aren't critical here - - f, err := MkdirAllHandle(rootDir, unsafePath, mode) - if err != nil { - return err - } - _ = f.Close() - return nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go deleted file mode 100644 index 7492d8cfa..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package pathrs - -import ( - "os" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" -) - -// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided -// using an *[os.File] handle, to ensure that the correct root directory is used. -func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { - handle, err := completeLookupInRoot(root, unsafePath) - if err != nil { - return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} - } - return handle, nil -} - -// OpenInRoot safely opens the provided unsafePath within the root. -// Effectively, OpenInRoot(root, unsafePath) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is -// possible for the returned file to be outside of the root. -// -// Note that the returned handle is an O_PATH handle, meaning that only a very -// limited set of operations will work on the handle. This is done to avoid -// accidentally opening an untrusted file that could cause issues (such as a -// disconnected TTY that could cause a DoS, or some other issue). In order to -// use the returned handle, you can "upgrade" it to a proper handle using -// [Reopen]. -// -// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin -func OpenInRoot(root, unsafePath string) (*os.File, error) { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer rootDir.Close() //nolint:errcheck // close failures aren't critical here - return OpenatInRoot(rootDir, unsafePath) -} - -// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd. -// Reopen(file, flags) is effectively equivalent to -// -// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) -// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) -// -// But with some extra hardenings to ensure that we are not tricked by a -// maliciously-configured /proc mount. While this attack scenario is not -// common, in container runtimes it is possible for higher-level runtimes to be -// tricked into configuring an unsafe /proc that can be used to attack file -// operations. See [CVE-2019-19921] for more details. -// -// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw -func Reopen(handle *os.File, flags int) (*os.File, error) { - return procfs.ReopenFd(handle, flags) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go deleted file mode 100644 index 937bc435f..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -package pathrs - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" - "github.com/cyphar/filepath-securejoin/pathrs-lite/procfs" -) - -func openat2(dir fd.Fd, path string, how *unix.OpenHow) (*os.File, error) { - file, err := fd.Openat2(dir, path, how) - if err != nil { - return nil, err - } - // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. - if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { - if actualPath, err := procfs.ProcSelfFdReadlink(file); err == nil { - // TODO: Ideally we would not need to dup the fd, but you cannot - // easily just swap an *os.File with one from the same fd - // (the GC will close the old one, and you cannot clear the - // finaliser easily because it is associated with an internal - // field of *os.File not *os.File itself). - newFile, err := fd.DupWithName(file, actualPath) - if err != nil { - return nil, err - } - file = newFile - } - } - return file, nil -} - -func lookupOpenat2(root fd.Fd, unsafePath string, partial bool) (*os.File, string, error) { - if !partial { - file, err := openat2(root, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - return file, "", err - } - return partialLookupOpenat2(root, unsafePath) -} - -// partialLookupOpenat2 is an alternative implementation of -// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a -// handle to the deepest existing child of the requested path within the root. -func partialLookupOpenat2(root fd.Fd, unsafePath string) (*os.File, string, error) { - // TODO: Implement this as a git-bisect-like binary search. - - unsafePath = filepath.ToSlash(unsafePath) // noop - endIdx := len(unsafePath) - var lastError error - for endIdx > 0 { - subpath := unsafePath[:endIdx] - - handle, err := openat2(root, subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - if err == nil { - // Jump over the slash if we have a non-"" remainingPath. - if endIdx < len(unsafePath) { - endIdx++ - } - // We found a subpath! - return handle, unsafePath[endIdx:], lastError - } - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { - // That path doesn't exist, let's try the next directory up. - endIdx = strings.LastIndexByte(subpath, '/') - lastError = err - continue - } - return nil, "", fmt.Errorf("open subpath: %w", err) - } - // If we couldn't open anything, the whole subpath is missing. Return a - // copy of the root fd so that the caller doesn't close this one by - // accident. - rootClone, err := fd.Dup(root) - if err != nil { - return nil, "", err - } - return rootClone, unsafePath, lastError -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go deleted file mode 100644 index ec187a414..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//go:build linux - -// Copyright (C) 2024-2025 Aleksa Sarai -// Copyright (C) 2024-2025 SUSE LLC -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Package procfs provides a safe API for operating on /proc on Linux. -package procfs - -import ( - "os" - - "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" -) - -// This package mostly just wraps internal/procfs APIs. This is necessary -// because we are forced to export some things from internal/procfs in order to -// avoid some dependency cycle issues, but we don't want users to see or use -// them. - -// ProcThreadSelfCloser is a callback that needs to be called when you are done -// operating on an [os.File] fetched using [Handle.OpenThreadSelf]. -// -// [os.File]: https://pkg.go.dev/os#File -type ProcThreadSelfCloser = procfs.ProcThreadSelfCloser - -// Handle is a wrapper around an *os.File handle to "/proc", which can be used -// to do further procfs-related operations in a safe way. -type Handle struct { - inner *procfs.Handle -} - -// Close close the resources associated with this [Handle]. Note that if this -// [Handle] was created with [OpenProcRoot], on some kernels the underlying -// procfs handle is cached and so this Close operation may be a no-op. However, -// you should always call Close on [Handle]s once you are done with them. -func (proc *Handle) Close() error { return proc.inner.Close() } - -// OpenProcRoot tries to open a "safer" handle to "/proc" (i.e., one with the -// "subset=pid" mount option applied, available from Linux 5.8). Unless you -// plan to do many [Handle.OpenRoot] operations, users should prefer to use -// this over [OpenUnsafeProcRoot] which is far more dangerous to keep open. -// -// If a safe handle cannot be opened, OpenProcRoot will fall back to opening a -// regular "/proc" handle. -// -// Note that using [Handle.OpenRoot] will still work with handles returned by -// this function. If a subpath cannot be operated on with a safe "/proc" -// handle, then [OpenUnsafeProcRoot] will be called internally and a temporary -// unsafe handle will be used. -func OpenProcRoot() (*Handle, error) { - proc, err := procfs.OpenProcRoot() - if err != nil { - return nil, err - } - return &Handle{inner: proc}, nil -} - -// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or -// masked paths. You must be extremely careful to make sure this handle is -// never leaked to a container and that you program cannot be tricked into -// writing to arbitrary paths within it. -// -// This is not necessary if you just wish to use [Handle.OpenRoot], as handles -// returned by [OpenProcRoot] will fall back to using a *temporary* unsafe -// handle in that case. You should only really use this if you need to do many -// operations with [Handle.OpenRoot] and the performance overhead of making -// many procfs handles is an issue. If you do use OpenUnsafeProcRoot, you -// should make sure to close the handle as soon as possible to avoid -// known-fd-number attacks. -func OpenUnsafeProcRoot() (*Handle, error) { - proc, err := procfs.OpenUnsafeProcRoot() - if err != nil { - return nil, err - } - return &Handle{inner: proc}, nil -} - -// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an -// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). -// Once finished with the handle, you must call the returned closer function -// ([runtime.UnlockOSThread]). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -// -// [runtime.UnlockOSThread]: https://pkg.go.dev/runtime#UnlockOSThread -func (proc *Handle) OpenThreadSelf(subpath string) (*os.File, ProcThreadSelfCloser, error) { - return proc.inner.OpenThreadSelf(subpath) -} - -// OpenSelf returns a handle to /proc/self/. -// -// Note that in Go programs with non-homogenous threads, this may result in -// spurious errors. If you are monkeying around with APIs that are -// thread-specific, you probably want to use [Handle.OpenThreadSelf] instead -// which will guarantee that the handle refers to the same thread as the caller -// is executing on. -func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { - return proc.inner.OpenSelf(subpath) -} - -// OpenRoot returns a handle to /proc/. -// -// You should only use this when you need to operate on global procfs files -// (such as sysctls in /proc/sys). Unlike [Handle.OpenThreadSelf], -// [Handle.OpenSelf], and [Handle.OpenPid], the procfs handle used internally -// for this operation will never use "subset=pid", which makes it a more juicy -// target for [CVE-2024-21626]-style attacks (and doing something like opening -// a directory with OpenRoot effectively leaks [OpenUnsafeProcRoot] as long as -// the file descriptor is open). -// -// [CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv -func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { - return proc.inner.OpenRoot(subpath) -} - -// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). -// This is mainly intended for usage when operating on other processes. -// -// You should not use this for the current thread, as special handling is -// needed for /proc/thread-self (or /proc/self/task/) when dealing with -// goroutine scheduling -- use [Handle.OpenThreadSelf] instead. -// -// To refer to the current thread-group, you should use prefer -// [Handle.OpenSelf] to passing os.Getpid as the pid argument. -func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { - return proc.inner.OpenPid(pid, subpath) -} - -// ProcSelfFdReadlink gets the real path of the given file by looking at -// /proc/self/fd/ with [readlink]. It is effectively just shorthand for -// something along the lines of: -// -// proc, err := procfs.OpenProcRoot() -// if err != nil { -// return err -// } -// link, err := proc.OpenThreadSelf(fmt.Sprintf("fd/%d", f.Fd())) -// if err != nil { -// return err -// } -// defer link.Close() -// var buf [4096]byte -// n, err := unix.Readlinkat(int(link.Fd()), "", buf[:]) -// if err != nil { -// return err -// } -// pathname := buf[:n] -// -// [readlink]: https://pkg.go.dev/golang.org/x/sys/unix#Readlinkat -func ProcSelfFdReadlink(f *os.File) (string, error) { - return procfs.ProcSelfFdReadlink(f) -} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index c5a480b5e..57af08b20 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -63,6 +63,7 @@ Andreas Köhler Andres G. Aragoneses Andres Leon Rangel Andrew France +Andrew He Andrew Hsu Andrew Macpherson Andrew McDonnell @@ -86,11 +87,12 @@ Archimedes Trajano Arko Dasgupta Arnaud Porterie Arnaud Rebillout +Arthur Flageul Arthur Peka Ashly Mathew Ashwini Oruganti Aslam Ahemad -Austin Vazquez +Austin Vazquez Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -135,10 +137,12 @@ Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +carsontham Carston Schilds Casey Korver Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Faragher Chao Wang @@ -220,7 +224,7 @@ David Alvarez David Beitey David Calavera David Cramer -David Dooling +David Dooling David Gageot David Karlsson David le Blanc @@ -265,6 +269,7 @@ Eli Uriegas Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> +Eng Zer Jun Eric Bode Eric Curtin Eric Engestrom @@ -345,6 +350,7 @@ Henning Sprang Henry N Hernan Garcia Hongbin Lu +Hossein Abbasi <16090309+hsnabszhdn@users.noreply.github.com> Hu Keping Huayi Zhang Hugo Chastel @@ -595,6 +601,7 @@ Michael Prokop Michael Scharf Michael Spetsiotis Michael Steinert +Michael Tews Michael West Michal Minář Michał Czeraszkiewicz @@ -896,6 +903,7 @@ Wenlong Zhang Wenzhi Liang Wes Morgan Wewang Xiaorenfine +Will Wang William Henry Xianglin Gao Xiaodong Liu diff --git a/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go b/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go index 9d408c00b..7061486a7 100644 --- a/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go +++ b/vendor/github.com/docker/cli/cli-plugins/metadata/metadata.go @@ -33,4 +33,6 @@ type Metadata struct { ShortDescription string `json:",omitempty"` // URL is a pointer to the plugin's homepage. URL string `json:",omitempty"` + // Hidden hides the plugin in completion and help message output. + Hidden bool `json:",omitempty"` } diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go index a75a66e66..4ec721f88 100644 --- a/vendor/github.com/docker/cli/cli/cobra.go +++ b/vendor/github.com/docker/cli/cli/cobra.go @@ -12,7 +12,6 @@ import ( "github.com/fvbommel/sortorder" "github.com/moby/term" "github.com/morikuni/aec" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -167,31 +166,6 @@ func (tcmd *TopLevelCommand) Initialize(ops ...command.CLIOption) error { return tcmd.dockerCli.Initialize(tcmd.opts, ops...) } -// VisitAll will traverse all commands from the root. -// -// Deprecated: this utility was only used internally and will be removed in the next release. -func VisitAll(root *cobra.Command, fn func(*cobra.Command)) { - visitAll(root, fn) -} - -func visitAll(root *cobra.Command, fn func(*cobra.Command)) { - for _, cmd := range root.Commands() { - visitAll(cmd, fn) - } - fn(root) -} - -// DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all -// commands within the tree rooted at cmd. -// -// Deprecated: this utility was only used internally and will be removed in the next release. -func DisableFlagsInUseLine(cmd *cobra.Command) { - visitAll(cmd, func(ccmd *cobra.Command) { - // do not add a `[flags]` to the end of the usage line. - ccmd.DisableFlagsInUseLine = true - }) -} - var helpCommand = &cobra.Command{ Use: "help [command]", Short: "Help about the command", @@ -200,7 +174,7 @@ var helpCommand = &cobra.Command{ RunE: func(c *cobra.Command, args []string) error { cmd, args, e := c.Root().Find(args) if cmd == nil || e != nil || len(args) > 0 { - return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) } helpFunc := cmd.HelpFunc() helpFunc(cmd, args) @@ -276,11 +250,12 @@ func commandAliases(cmd *cobra.Command) string { if cmd.HasParent() { parentPath = cmd.Parent().CommandPath() + " " } - aliases := cmd.CommandPath() + var aliases strings.Builder + aliases.WriteString(cmd.CommandPath()) for _, alias := range cmd.Aliases { - aliases += ", " + parentPath + alias + aliases.WriteString(", " + parentPath + alias) } - return aliases + return aliases.String() } func topCommands(cmd *cobra.Command) []*cobra.Command { @@ -376,13 +351,10 @@ func orchestratorSubCommands(cmd *cobra.Command) []*cobra.Command { func allManagementSubCommands(cmd *cobra.Command) []*cobra.Command { cmds := []*cobra.Command{} for _, sub := range cmd.Commands() { - if isPlugin(sub) { - if invalidPluginReason(sub) == "" { - cmds = append(cmds, sub) - } + if invalidPluginReason(sub) != "" { continue } - if sub.IsAvailableCommand() && sub.HasSubCommands() { + if sub.IsAvailableCommand() && (isPlugin(sub) || sub.HasSubCommands()) { cmds = append(cmds, sub) } } diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go index 85fac5650..b1e945334 100644 --- a/vendor/github.com/docker/cli/cli/command/cli.go +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -1,10 +1,11 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package command import ( "context" + "errors" "fmt" "io" "os" @@ -23,11 +24,8 @@ import ( "github.com/docker/cli/cli/streams" "github.com/docker/cli/cli/version" dopts "github.com/docker/cli/opts" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/pkg/errors" + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/client" "github.com/spf13/cobra" ) @@ -45,12 +43,9 @@ type Cli interface { Client() client.APIClient Streams SetIn(in *streams.In) - Apply(ops ...CLIOption) error config.Provider ServerInfo() ServerInfo - DefaultVersion() string CurrentVersion() string - ContentTrustEnabled() bool BuildKitEnabled() (bool, error) ContextStore() store.Store CurrentContext() string @@ -70,7 +65,6 @@ type DockerCli struct { err *streams.Out client client.APIClient serverInfo ServerInfo - contentTrust bool contextStore store.Store currentContext string init sync.Once @@ -78,6 +72,7 @@ type DockerCli struct { dockerEndpoint docker.Endpoint contextStoreConfig *store.Config initTimeout time.Duration + userAgent string res telemetryResource // baseCtx is the base context used for internal operations. In the future @@ -88,17 +83,12 @@ type DockerCli struct { enableGlobalMeter, enableGlobalTracer bool } -// DefaultVersion returns [api.DefaultVersion]. -func (*DockerCli) DefaultVersion() string { - return api.DefaultVersion -} - // CurrentVersion returns the API version currently negotiated, or the default // version otherwise. func (cli *DockerCli) CurrentVersion() string { _ = cli.initialize() if cli.client == nil { - return api.DefaultVersion + return client.MaxAPIVersion } return cli.client.ClientVersion() } @@ -157,19 +147,13 @@ func (cli *DockerCli) ServerInfo() ServerInfo { return cli.serverInfo } -// ContentTrustEnabled returns whether content trust has been enabled by an -// environment variable. -func (cli *DockerCli) ContentTrustEnabled() bool { - return cli.contentTrust -} - // BuildKitEnabled returns buildkit is enabled or not. func (cli *DockerCli) BuildKitEnabled() (bool, error) { // use DOCKER_BUILDKIT env var value if set and not empty if v := os.Getenv("DOCKER_BUILDKIT"); v != "" { enabled, err := strconv.ParseBool(v) if err != nil { - return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + return false, fmt.Errorf("DOCKER_BUILDKIT environment variable expects boolean value: %w", err) } return enabled, nil } @@ -269,7 +253,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption) cli.contextStore = &ContextStoreWithDefault{ Store: store.New(config.ContextStoreDir(), *cli.contextStoreConfig), Resolver: func() (*DefaultContext, error) { - return ResolveDefaultContext(cli.options, *cli.contextStoreConfig) + return resolveDefaultContext(cli.options, *cli.contextStoreConfig) }, } @@ -306,17 +290,17 @@ func NewAPIClientFromFlags(opts *cliflags.ClientOptions, configFile *configfile. contextStore := &ContextStoreWithDefault{ Store: store.New(config.ContextStoreDir(), storeConfig), Resolver: func() (*DefaultContext, error) { - return ResolveDefaultContext(opts, storeConfig) + return resolveDefaultContext(opts, storeConfig) }, } endpoint, err := resolveDockerEndpoint(contextStore, resolveContextName(opts, configFile)) if err != nil { - return nil, errors.Wrap(err, "unable to resolve docker endpoint") + return nil, fmt.Errorf("unable to resolve docker endpoint: %w", err) } - return newAPIClientFromEndpoint(endpoint, configFile) + return newAPIClientFromEndpoint(endpoint, configFile, client.WithUserAgent(UserAgent())) } -func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile) (client.APIClient, error) { +func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile, extraOpts ...client.Opt) (client.APIClient, error) { opts, err := ep.ClientOpts() if err != nil { return nil, err @@ -324,8 +308,15 @@ func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigF if len(configFile.HTTPHeaders) > 0 { opts = append(opts, client.WithHTTPHeaders(configFile.HTTPHeaders)) } - opts = append(opts, withCustomHeadersFromEnv(), client.WithUserAgent(UserAgent())) - return client.NewClientWithOpts(opts...) + withCustomHeaders, err := withCustomHeadersFromEnv() + if err != nil { + return nil, err + } + if withCustomHeaders != nil { + opts = append(opts, withCustomHeaders) + } + opts = append(opts, extraOpts...) + return client.New(opts...) } func resolveDockerEndpoint(s store.Reader, contextName string) (docker.Endpoint, error) { @@ -386,24 +377,21 @@ func (cli *DockerCli) initializeFromClient() { ctx, cancel := context.WithTimeout(cli.baseCtx, cli.getInitTimeout()) defer cancel() - ping, err := cli.client.Ping(ctx) + ping, err := cli.client.Ping(ctx, client.PingOptions{ + NegotiateAPIVersion: true, + ForceNegotiate: true, + }) if err != nil { // Default to true if we fail to connect to daemon cli.serverInfo = ServerInfo{HasExperimental: true} - - if ping.APIVersion != "" { - cli.client.NegotiateAPIVersionPing(ping) - } return } - cli.serverInfo = ServerInfo{ HasExperimental: ping.Experimental, OSType: ping.OSType, BuildkitVersion: ping.BuilderVersion, SwarmStatus: ping.SwarmStatus, } - cli.client.NegotiateAPIVersionPing(ping) } // ContextStore returns the ContextStore @@ -541,11 +529,12 @@ func (cli *DockerCli) initialize() error { cli.init.Do(func() { cli.dockerEndpoint, cli.initErr = cli.getDockerEndPoint() if cli.initErr != nil { - cli.initErr = errors.Wrap(cli.initErr, "unable to resolve docker endpoint") + cli.initErr = fmt.Errorf("unable to resolve docker endpoint: %w", cli.initErr) return } if cli.client == nil { - if cli.client, cli.initErr = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile); cli.initErr != nil { + ops := []client.Opt{client.WithUserAgent(cli.userAgent)} + if cli.client, cli.initErr = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile, ops...); cli.initErr != nil { return } } @@ -557,16 +546,6 @@ func (cli *DockerCli) initialize() error { return cli.initErr } -// Apply all the operation on the cli -func (cli *DockerCli) Apply(ops ...CLIOption) error { - for _, op := range ops { - if err := op(cli); err != nil { - return err - } - } - return nil -} - // ServerInfo stores details about the supported features and platform of the // server type ServerInfo struct { @@ -581,7 +560,7 @@ type ServerInfo struct { // in the ping response, or if an error occurred, in which case the client // should use other ways to get the current swarm status, such as the /swarm // endpoint. - SwarmStatus *swarm.Status + SwarmStatus *client.SwarmStatus } // NewDockerCli returns a DockerCli instance with all operators applied on it. @@ -589,15 +568,17 @@ type ServerInfo struct { // environment. func NewDockerCli(ops ...CLIOption) (*DockerCli, error) { defaultOps := []CLIOption{ - WithContentTrustFromEnv(), WithDefaultContextStoreConfig(), WithStandardStreams(), + WithUserAgent(UserAgent()), } ops = append(defaultOps, ops...) cli := &DockerCli{baseCtx: context.Background()} - if err := cli.Apply(ops...); err != nil { - return nil, err + for _, op := range ops { + if err := op(cli); err != nil { + return nil, err + } } return cli, nil } @@ -609,11 +590,11 @@ func getServerHost(hosts []string, defaultToTLS bool) (string, error) { case 1: return dopts.ParseHost(defaultToTLS, hosts[0]) default: - return "", errors.New("Specify only one -H") + return "", errors.New("specify only one -H") } } -// UserAgent returns the user agent string used for making API requests +// UserAgent returns the default user agent string used for making API requests. func UserAgent() string { return "Docker-Client/" + version.Version + " (" + runtime.GOOS + ")" } diff --git a/vendor/github.com/docker/cli/cli/command/cli_options.go b/vendor/github.com/docker/cli/cli/command/cli_options.go index dd3c94733..f787956c5 100644 --- a/vendor/github.com/docker/cli/cli/command/cli_options.go +++ b/vendor/github.com/docker/cli/cli/command/cli_options.go @@ -3,16 +3,16 @@ package command import ( "context" "encoding/csv" + "errors" + "fmt" "io" "net/http" "os" - "strconv" "strings" "github.com/docker/cli/cli/streams" - "github.com/docker/docker/client" + "github.com/moby/moby/client" "github.com/moby/term" - "github.com/pkg/errors" ) // CLIOption is a functional argument to apply options to a [DockerCli]. These @@ -75,28 +75,6 @@ func WithErrorStream(err io.Writer) CLIOption { } } -// WithContentTrustFromEnv enables content trust on a cli from environment variable DOCKER_CONTENT_TRUST value. -func WithContentTrustFromEnv() CLIOption { - return func(cli *DockerCli) error { - cli.contentTrust = false - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - cli.contentTrust = true - } - } - return nil - } -} - -// WithContentTrust enables content trust on a cli. -func WithContentTrust(enabled bool) CLIOption { - return func(cli *DockerCli) error { - cli.contentTrust = enabled - return nil - } -} - // WithDefaultContextStoreConfig configures the cli to use the default context store configuration. func WithDefaultContextStoreConfig() CLIOption { return func(cli *DockerCli) error { @@ -180,61 +158,70 @@ const envOverrideHTTPHeaders = "DOCKER_CUSTOM_HEADERS" // override headers with the same name). // // TODO(thaJeztah): this is a client Option, and should be moved to the client. It is non-exported for that reason. -func withCustomHeadersFromEnv() client.Opt { - return func(apiClient *client.Client) error { - value := os.Getenv(envOverrideHTTPHeaders) - if value == "" { - return nil - } - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return invalidParameter(errors.Errorf( - "failed to parse custom headers from %s environment variable: value must be formatted as comma-separated key=value pairs", - envOverrideHTTPHeaders, +func withCustomHeadersFromEnv() (client.Opt, error) { + value := os.Getenv(envOverrideHTTPHeaders) + if value == "" { + return nil, nil + } + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return nil, invalidParameter(fmt.Errorf( + "failed to parse custom headers from %s environment variable: value must be formatted as comma-separated key=value pairs", + envOverrideHTTPHeaders, + )) + } + if len(fields) == 0 { + return nil, nil + } + + env := map[string]string{} + for _, kv := range fields { + k, v, hasValue := strings.Cut(kv, "=") + + // Only strip whitespace in keys; preserve whitespace in values. + k = strings.TrimSpace(k) + + if k == "" { + return nil, invalidParameter(fmt.Errorf( + `failed to set custom headers from %s environment variable: value contains a key=value pair with an empty key: '%s'`, + envOverrideHTTPHeaders, kv, )) } - if len(fields) == 0 { - return nil + + // We don't currently allow empty key=value pairs, and produce an error. + // This is something we could allow in future (e.g. to read value + // from an environment variable with the same name). In the meantime, + // produce an error to prevent users from depending on this. + if !hasValue { + return nil, invalidParameter(fmt.Errorf( + `failed to set custom headers from %s environment variable: missing "=" in key=value pair: '%s'`, + envOverrideHTTPHeaders, kv, + )) } - env := map[string]string{} - for _, kv := range fields { - k, v, hasValue := strings.Cut(kv, "=") + env[http.CanonicalHeaderKey(k)] = v + } - // Only strip whitespace in keys; preserve whitespace in values. - k = strings.TrimSpace(k) + if len(env) == 0 { + // We should probably not hit this case, as we don't skip values + // (only return errors), but we don't want to discard existing + // headers with an empty set. + return nil, nil + } - if k == "" { - return invalidParameter(errors.Errorf( - `failed to set custom headers from %s environment variable: value contains a key=value pair with an empty key: '%s'`, - envOverrideHTTPHeaders, kv, - )) - } + // TODO(thaJeztah): add a client.WithExtraHTTPHeaders() function to allow these headers to be _added_ to existing ones, instead of _replacing_ + // see https://github.com/docker/cli/pull/5098#issuecomment-2147403871 (when updating, also update the WARNING in the function and env-var GoDoc) + return client.WithHTTPHeaders(env), nil +} - // We don't currently allow empty key=value pairs, and produce an error. - // This is something we could allow in future (e.g. to read value - // from an environment variable with the same name). In the meantime, - // produce an error to prevent users from depending on this. - if !hasValue { - return invalidParameter(errors.Errorf( - `failed to set custom headers from %s environment variable: missing "=" in key=value pair: '%s'`, - envOverrideHTTPHeaders, kv, - )) - } - - env[http.CanonicalHeaderKey(k)] = v +// WithUserAgent configures the User-Agent string for cli HTTP requests. +func WithUserAgent(userAgent string) CLIOption { + return func(cli *DockerCli) error { + if userAgent == "" { + return errors.New("user agent cannot be blank") } - - if len(env) == 0 { - // We should probably not hit this case, as we don't skip values - // (only return errors), but we don't want to discard existing - // headers with an empty set. - return nil - } - - // TODO(thaJeztah): add a client.WithExtraHTTPHeaders() function to allow these headers to be _added_ to existing ones, instead of _replacing_ - // see https://github.com/docker/cli/pull/5098#issuecomment-2147403871 (when updating, also update the WARNING in the function and env-var GoDoc) - return client.WithHTTPHeaders(env)(apiClient) + cli.userAgent = userAgent + return nil } } diff --git a/vendor/github.com/docker/cli/cli/command/context.go b/vendor/github.com/docker/cli/cli/command/context.go index 64e88e449..2ca82a5a8 100644 --- a/vendor/github.com/docker/cli/cli/command/context.go +++ b/vendor/github.com/docker/cli/cli/command/context.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package command diff --git a/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go index 9b49b3af2..e6315b8e8 100644 --- a/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go +++ b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go @@ -1,13 +1,15 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package command import ( + "errors" + "fmt" + "github.com/docker/cli/cli/context/docker" "github.com/docker/cli/cli/context/store" cliflags "github.com/docker/cli/cli/flags" - "github.com/pkg/errors" ) const ( @@ -51,8 +53,8 @@ type EndpointDefaultResolver interface { ResolveDefault() (any, *store.EndpointTLSData, error) } -// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters -func ResolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*DefaultContext, error) { +// resolveDefaultContext creates a Metadata for the current CLI invocation parameters +func resolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*DefaultContext, error) { contextTLSData := store.ContextTLSData{ Endpoints: make(map[string]store.EndpointTLSData), } @@ -185,7 +187,7 @@ func (s *ContextStoreWithDefault) GetTLSData(contextName, endpointName, fileName return nil, err } if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil { - return nil, notFound(errors.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName)) + return nil, notFound(fmt.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName)) } return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil } diff --git a/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go b/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go index ade5de73f..3a3c34998 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/buildcache.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/docker/docker/api/types/build" "github.com/docker/go-units" + "github.com/moby/moby/api/types/build" ) const ( @@ -51,7 +51,7 @@ shared: {{.Shared}} return Format(source) } -func buildCacheSort(buildCache []*build.CacheRecord) { +func buildCacheSort(buildCache []build.CacheRecord) { sort.Slice(buildCache, func(i, j int) bool { lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt switch { @@ -70,7 +70,7 @@ func buildCacheSort(buildCache []*build.CacheRecord) { } // BuildCacheWrite renders the context for a list of containers -func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error { +func BuildCacheWrite(ctx Context, buildCaches []build.CacheRecord) error { render := func(format func(subContext SubContext) error) error { buildCacheSort(buildCaches) for _, bc := range buildCaches { @@ -87,7 +87,7 @@ func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error { type buildCacheContext struct { HeaderContext trunc bool - v *build.CacheRecord + v build.CacheRecord } func newBuildCacheContext() *buildCacheContext { @@ -126,8 +126,6 @@ func (c *buildCacheContext) Parent() string { var parent string if len(c.v.Parents) > 0 { parent = strings.Join(c.v.Parents, ", ") - } else { - parent = c.v.Parent //nolint:staticcheck // Ignore SA1019: Field was deprecated in API v1.42, but kept for backward compatibility } if c.trunc { return TruncateID(parent) diff --git a/vendor/github.com/docker/cli/cli/command/formatter/container.go b/vendor/github.com/docker/cli/cli/command/formatter/container.go index 0a5c587af..979d3eb82 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/container.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/container.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package formatter @@ -13,8 +13,8 @@ import ( "github.com/containerd/platforms" "github.com/distribution/reference" - "github.com/docker/docker/api/types/container" "github.com/docker/go-units" + "github.com/moby/moby/api/types/container" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -170,27 +170,33 @@ func (c *ContainerContext) Image() string { if c.c.Image == "" { return "" } - if c.trunc { - if trunc := TruncateID(c.c.ImageID); trunc == TruncateID(c.c.Image) { - return trunc + if !c.trunc { + return c.c.Image + } + if trunc := TruncateID(c.c.ImageID); trunc == TruncateID(c.c.Image) { + return trunc + } + ref, err := reference.ParseNormalizedNamed(c.c.Image) + if err != nil { + return c.c.Image + } + + if _, ok := ref.(reference.Digested); ok { + // strip the digest, but preserve the tag (if any) + var tag string + if t, ok := ref.(reference.Tagged); ok { + tag = t.Tag() } - // truncate digest if no-trunc option was not selected - ref, err := reference.ParseNormalizedNamed(c.c.Image) - if err == nil { - if nt, ok := ref.(reference.NamedTagged); ok { - // case for when a tag is provided - if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { - return reference.FamiliarString(namedTagged) - } - } else { - // case for when a tag is not provided - named := reference.TrimNamed(ref) - return reference.FamiliarString(named) + ref = reference.TrimNamed(ref) + if tag != "" { + if out, err := reference.WithTag(ref, tag); err == nil { + ref = out } } } - return c.c.Image + // Format as "familiar" name with "docker.io[/library]" trimmed. + return reference.FamiliarString(ref) } // Command returns's the container's command. If the trunc option is set, the @@ -241,7 +247,7 @@ func (c *ContainerContext) Ports() string { // State returns the container's current state (e.g. "running" or "paused"). // Refer to [container.ContainerState] for possible states. func (c *ContainerContext) State() string { - return c.c.State + return string(c.c.State) } // Status returns the container's status in a human readable form (for example, @@ -338,7 +344,7 @@ func (c *ContainerContext) Networks() string { // DisplayablePorts returns formatted string representing open ports of container // e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" // it's used by command 'docker ps' -func DisplayablePorts(ports []container.Port) string { +func DisplayablePorts(ports []container.PortSummary) string { type portGroup struct { first uint16 last uint16 @@ -354,13 +360,13 @@ func DisplayablePorts(ports []container.Port) string { for _, port := range ports { current := port.PrivatePort portKey := port.Type - if port.IP != "" { + if port.IP.IsValid() { if port.PublicPort != current { - hAddrPort := net.JoinHostPort(port.IP, strconv.Itoa(int(port.PublicPort))) + hAddrPort := net.JoinHostPort(port.IP.String(), strconv.Itoa(int(port.PublicPort))) hostMappings = append(hostMappings, fmt.Sprintf("%s->%d/%s", hAddrPort, port.PrivatePort, port.Type)) continue } - portKey = port.IP + "/" + port.Type + portKey = port.IP.String() + "/" + port.Type } group := groupMap[portKey] @@ -404,13 +410,13 @@ func formGroup(key string, start, last uint16) string { return group + "/" + groupType } -func comparePorts(i, j container.Port) bool { +func comparePorts(i, j container.PortSummary) bool { if i.PrivatePort != j.PrivatePort { return i.PrivatePort < j.PrivatePort } if i.IP != j.IP { - return i.IP < j.IP + return i.IP.String() < j.IP.String() } if i.PublicPort != j.PublicPort { diff --git a/vendor/github.com/docker/cli/cli/command/formatter/custom.go b/vendor/github.com/docker/cli/cli/command/formatter/custom.go index c2b9cb2c5..27931af76 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/custom.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/custom.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package formatter diff --git a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go index b663c59b2..b14afc1a0 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go @@ -7,19 +7,20 @@ import ( "text/template" "github.com/distribution/reference" - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/volume" "github.com/docker/go-units" + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/volume" + "github.com/moby/moby/client" ) const ( - defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" - defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}" - defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" - defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}" - defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + defaultDiskUsageImageTableFormat Format = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat Format = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat Format = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageBuildCacheTableFormat Format = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}" + defaultDiskUsageTableFormat Format = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" typeHeader = "TYPE" totalHeader = "TOTAL" @@ -33,19 +34,18 @@ const ( // DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct. type DiskUsageContext struct { Context - Verbose bool - LayersSize int64 - Images []*image.Summary - Containers []*container.Summary - Volumes []*volume.Volume - BuildCache []*build.CacheRecord - BuilderSize int64 + Verbose bool + + ImageDiskUsage client.ImagesDiskUsage + BuildCacheDiskUsage client.BuildCacheDiskUsage + ContainerDiskUsage client.ContainersDiskUsage + VolumeDiskUsage client.VolumesDiskUsage } -func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { +func (ctx *DiskUsageContext) startSubsection(format Format) (*template.Template, error) { ctx.buffer = &bytes.Buffer{} ctx.header = "" - ctx.Format = Format(format) + ctx.Format = format ctx.preFormat() return ctx.parseFormat() @@ -69,7 +69,7 @@ func NewDiskUsageFormat(source string, verbose bool) Format { {{end -}}` return format case !verbose && source == TableFormatKey: - return Format(defaultDiskUsageTableFormat) + return defaultDiskUsageTableFormat case !verbose && source == RawFormatKey: format := `type: {{.Type}} total: {{.TotalCount}} @@ -96,35 +96,49 @@ func (ctx *DiskUsageContext) Write() (err error) { } err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ - totalSize: ctx.LayersSize, - images: ctx.Images, + totalCount: ctx.ImageDiskUsage.TotalCount, + activeCount: ctx.ImageDiskUsage.ActiveCount, + totalSize: ctx.ImageDiskUsage.TotalSize, + reclaimable: ctx.ImageDiskUsage.Reclaimable, + images: ctx.ImageDiskUsage.Items, }) if err != nil { return err } err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ - containers: ctx.Containers, + totalCount: ctx.ContainerDiskUsage.TotalCount, + activeCount: ctx.ContainerDiskUsage.ActiveCount, + totalSize: ctx.ContainerDiskUsage.TotalSize, + reclaimable: ctx.ContainerDiskUsage.Reclaimable, + containers: ctx.ContainerDiskUsage.Items, }) if err != nil { return err } err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ - volumes: ctx.Volumes, + totalCount: ctx.VolumeDiskUsage.TotalCount, + activeCount: ctx.VolumeDiskUsage.ActiveCount, + totalSize: ctx.VolumeDiskUsage.TotalSize, + reclaimable: ctx.VolumeDiskUsage.Reclaimable, + volumes: ctx.VolumeDiskUsage.Items, }) if err != nil { return err } err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{ - builderSize: ctx.BuilderSize, - buildCache: ctx.BuildCache, + totalCount: ctx.BuildCacheDiskUsage.TotalCount, + activeCount: ctx.BuildCacheDiskUsage.ActiveCount, + builderSize: ctx.BuildCacheDiskUsage.TotalSize, + reclaimable: ctx.BuildCacheDiskUsage.Reclaimable, + buildCache: ctx.BuildCacheDiskUsage.Items, }) if err != nil { return err } - diskUsageContainersCtx := diskUsageContainersContext{containers: []*container.Summary{}} + diskUsageContainersCtx := diskUsageContainersContext{containers: []container.Summary{}} diskUsageContainersCtx.Header = SubHeaderContext{ "Type": typeHeader, "TotalCount": totalHeader, @@ -146,18 +160,18 @@ type diskUsageContext struct { func (ctx *DiskUsageContext) verboseWrite() error { duc := &diskUsageContext{ - Images: make([]*imageContext, 0, len(ctx.Images)), - Containers: make([]*ContainerContext, 0, len(ctx.Containers)), - Volumes: make([]*volumeContext, 0, len(ctx.Volumes)), - BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)), + Images: make([]*imageContext, 0, len(ctx.ImageDiskUsage.Items)), + Containers: make([]*ContainerContext, 0, len(ctx.ContainerDiskUsage.Items)), + Volumes: make([]*volumeContext, 0, len(ctx.VolumeDiskUsage.Items)), + BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCacheDiskUsage.Items)), } trunc := ctx.Format.IsTable() // First images - for _, i := range ctx.Images { + for _, i := range ctx.ImageDiskUsage.Items { repo := "" tag := "" - if len(i.RepoTags) > 0 && !isDangling(*i) { + if len(i.RepoTags) > 0 && !isDangling(i) { // Only show the first tag ref, err := reference.ParseNormalizedNamed(i.RepoTags[0]) if err != nil { @@ -173,25 +187,25 @@ func (ctx *DiskUsageContext) verboseWrite() error { repo: repo, tag: tag, trunc: trunc, - i: *i, + i: i, }) } // Now containers - for _, c := range ctx.Containers { + for _, c := range ctx.ContainerDiskUsage.Items { // Don't display the virtual size c.SizeRootFs = 0 - duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: *c}) + duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: c}) } // And volumes - for _, v := range ctx.Volumes { - duc.Volumes = append(duc.Volumes, &volumeContext{v: *v}) + for _, v := range ctx.VolumeDiskUsage.Items { + duc.Volumes = append(duc.Volumes, &volumeContext{v: v}) } // And build cache - buildCacheSort(ctx.BuildCache) - for _, v := range ctx.BuildCache { + buildCacheSort(ctx.BuildCacheDiskUsage.Items) + for _, v := range ctx.BuildCacheDiskUsage.Items { duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc}) } @@ -212,7 +226,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error { if err != nil { return err } - ctx.Output.Write([]byte("Images space usage:\n\n")) + _, _ = ctx.Output.Write([]byte("Images space usage:\n\n")) for _, img := range duc.Images { if err := ctx.contextFormat(tmpl, img); err != nil { return err @@ -224,7 +238,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error { if err != nil { return err } - ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + _, _ = ctx.Output.Write([]byte("\nContainers space usage:\n\n")) for _, c := range duc.Containers { if err := ctx.contextFormat(tmpl, c); err != nil { return err @@ -248,7 +262,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error { if err != nil { return err } - _, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize))) + _, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuildCacheDiskUsage.TotalSize))) for _, v := range duc.BuildCache { if err := ctx.contextFormat(tmpl, v); err != nil { return err @@ -261,8 +275,11 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error { type diskUsageImagesContext struct { HeaderContext - totalSize int64 - images []*image.Summary + totalSize int64 + reclaimable int64 + totalCount int64 + activeCount int64 + images []image.Summary } func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) { @@ -274,18 +291,11 @@ func (*diskUsageImagesContext) Type() string { } func (c *diskUsageImagesContext) TotalCount() string { - return strconv.Itoa(len(c.images)) + return strconv.FormatInt(c.totalCount, 10) } func (c *diskUsageImagesContext) Active() string { - used := 0 - for _, i := range c.images { - if i.Containers > 0 { - used++ - } - } - - return strconv.Itoa(used) + return strconv.FormatInt(c.activeCount, 10) } func (c *diskUsageImagesContext) Size() string { @@ -293,27 +303,19 @@ func (c *diskUsageImagesContext) Size() string { } func (c *diskUsageImagesContext) Reclaimable() string { - var used int64 - - for _, i := range c.images { - if i.Containers != 0 { - if i.Size == -1 || i.SharedSize == -1 { - continue - } - used += i.Size - i.SharedSize - } - } - - reclaimable := c.totalSize - used if c.totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize) } - return units.HumanSize(float64(reclaimable)) + return units.HumanSize(float64(c.reclaimable)) } type diskUsageContainersContext struct { HeaderContext - containers []*container.Summary + totalCount int64 + activeCount int64 + totalSize int64 + reclaimable int64 + containers []container.Summary } func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) { @@ -325,62 +327,32 @@ func (*diskUsageContainersContext) Type() string { } func (c *diskUsageContainersContext) TotalCount() string { - return strconv.Itoa(len(c.containers)) -} - -func (*diskUsageContainersContext) isActive(ctr container.Summary) bool { - switch ctr.State { - case container.StateRunning, container.StatePaused, container.StateRestarting: - return true - case container.StateCreated, container.StateRemoving, container.StateExited, container.StateDead: - return false - default: - // Unknown state (should never happen). - return false - } + return strconv.FormatInt(c.totalCount, 10) } func (c *diskUsageContainersContext) Active() string { - used := 0 - for _, ctr := range c.containers { - if c.isActive(*ctr) { - used++ - } - } - - return strconv.Itoa(used) + return strconv.FormatInt(c.activeCount, 10) } func (c *diskUsageContainersContext) Size() string { - var size int64 - - for _, ctr := range c.containers { - size += ctr.SizeRw - } - - return units.HumanSize(float64(size)) + return units.HumanSize(float64(c.totalSize)) } func (c *diskUsageContainersContext) Reclaimable() string { - var reclaimable, totalSize int64 - - for _, ctr := range c.containers { - if !c.isActive(*ctr) { - reclaimable += ctr.SizeRw - } - totalSize += ctr.SizeRw + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize) } - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return units.HumanSize(float64(reclaimable)) + return units.HumanSize(float64(c.reclaimable)) } type diskUsageVolumesContext struct { HeaderContext - volumes []*volume.Volume + totalCount int64 + activeCount int64 + totalSize int64 + reclaimable int64 + volumes []volume.Volume } func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { @@ -392,56 +364,32 @@ func (*diskUsageVolumesContext) Type() string { } func (c *diskUsageVolumesContext) TotalCount() string { - return strconv.Itoa(len(c.volumes)) + return strconv.FormatInt(c.totalCount, 10) } func (c *diskUsageVolumesContext) Active() string { - used := 0 - for _, v := range c.volumes { - if v.UsageData.RefCount > 0 { - used++ - } - } - - return strconv.Itoa(used) + return strconv.FormatInt(c.activeCount, 10) } func (c *diskUsageVolumesContext) Size() string { - var size int64 - - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - size += v.UsageData.Size - } - } - - return units.HumanSize(float64(size)) + return units.HumanSize(float64(c.totalSize)) } func (c *diskUsageVolumesContext) Reclaimable() string { - var reclaimable int64 - var totalSize int64 - - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - if v.UsageData.RefCount == 0 { - reclaimable += v.UsageData.Size - } - totalSize += v.UsageData.Size - } + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize) } - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return units.HumanSize(float64(reclaimable)) + return units.HumanSize(float64(c.reclaimable)) } type diskUsageBuilderContext struct { HeaderContext + totalCount int64 + activeCount int64 builderSize int64 - buildCache []*build.CacheRecord + reclaimable int64 + buildCache []build.CacheRecord } func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) { @@ -453,17 +401,11 @@ func (*diskUsageBuilderContext) Type() string { } func (c *diskUsageBuilderContext) TotalCount() string { - return strconv.Itoa(len(c.buildCache)) + return strconv.FormatInt(c.totalCount, 10) } func (c *diskUsageBuilderContext) Active() string { - numActive := 0 - for _, bc := range c.buildCache { - if bc.InUse { - numActive++ - } - } - return strconv.Itoa(numActive) + return strconv.FormatInt(c.activeCount, 10) } func (c *diskUsageBuilderContext) Size() string { @@ -471,12 +413,5 @@ func (c *diskUsageBuilderContext) Size() string { } func (c *diskUsageBuilderContext) Reclaimable() string { - var inUseBytes int64 - for _, bc := range c.buildCache { - if bc.InUse && !bc.Shared { - inUseBytes += bc.Size - } - } - - return units.HumanSize(float64(c.builderSize - inUseBytes)) + return units.HumanSize(float64(c.reclaimable)) } diff --git a/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go b/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go index b062c3391..7b0bd687f 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/displayutils.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package formatter @@ -8,6 +8,7 @@ import ( "strings" "unicode/utf8" + "github.com/moby/moby/client/pkg/stringid" "golang.org/x/text/width" ) @@ -27,23 +28,12 @@ func charWidth(r rune) int { } } -const shortLen = 12 - // TruncateID returns a shorthand version of a string identifier for presentation, // after trimming digest algorithm prefix (if any). // -// This function is a copy of [stringid.TruncateID] for presentation / formatting -// purposes. -// -// [stringid.TruncateID]: https://github.com/moby/moby/blob/v28.3.2/pkg/stringid/stringid.go#L19 +// This function is a wrapper for [stringid.TruncateID] for convenience. func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id + return stringid.TruncateID(id) } // Ellipsis truncates a string to fit within maxDisplayWidth, and appends ellipsis (…). diff --git a/vendor/github.com/docker/cli/cli/command/formatter/formatter.go b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go index 7803cabe4..88905cd1b 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/formatter.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go @@ -1,17 +1,17 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package formatter import ( "bytes" + "fmt" "io" "strings" "text/template" "github.com/docker/cli/cli/command/formatter/tabwriter" "github.com/docker/cli/templates" - "github.com/pkg/errors" ) // Format keys used to specify certain kinds of output formats @@ -76,7 +76,7 @@ func (c *Context) preFormat() { func (c *Context) parseFormat() (*template.Template, error) { tmpl, err := templates.Parse(c.finalFormat) if err != nil { - return nil, errors.Wrap(err, "template parsing error") + return nil, fmt.Errorf("template parsing error: %w", err) } return tmpl, nil } @@ -100,7 +100,7 @@ func (c *Context) postFormat(tmpl *template.Template, subContext SubContext) { func (c *Context) contextFormat(tmpl *template.Template, subContext SubContext) error { if err := tmpl.Execute(c.buffer, subContext); err != nil { - return errors.Wrap(err, "template parsing error") + return fmt.Errorf("template parsing error: %w", err) } if c.Format.IsTable() && c.header != nil { c.header = subContext.FullHeader() diff --git a/vendor/github.com/docker/cli/cli/command/formatter/image.go b/vendor/github.com/docker/cli/cli/command/formatter/image.go index 74c2fe758..d24bf5094 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/image.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/image.go @@ -5,8 +5,8 @@ import ( "time" "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" "github.com/docker/go-units" + "github.com/moby/moby/api/types/image" ) const ( @@ -202,7 +202,6 @@ func newImageContext() *imageContext { "CreatedAt": CreatedAtHeader, "Size": SizeHeader, "Containers": containersHeader, - "VirtualSize": SizeHeader, // Deprecated: VirtualSize is deprecated, and equivalent to Size. "SharedSize": sharedSizeHeader, "UniqueSize": uniqueSizeHeader, } @@ -257,15 +256,6 @@ func (c *imageContext) Containers() string { return strconv.FormatInt(c.i.Containers, 10) } -// VirtualSize shows the virtual size of the image and all of its parent -// images. Starting with docker 1.10, images are self-contained, and -// the VirtualSize is identical to Size. -// -// Deprecated: VirtualSize is deprecated, and equivalent to [imageContext.Size]. -func (c *imageContext) VirtualSize() string { - return units.HumanSize(float64(c.i.Size)) -} - func (c *imageContext) SharedSize() string { if c.i.SharedSize == -1 { return "N/A" diff --git a/vendor/github.com/docker/cli/cli/command/formatter/reflect.go b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go index 316583376..89da07b8a 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/reflect.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go @@ -1,14 +1,14 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package formatter import ( "encoding/json" + "errors" + "fmt" "reflect" "unicode" - - "github.com/pkg/errors" ) // MarshalJSON marshals x into json @@ -25,14 +25,14 @@ func MarshalJSON(x any) ([]byte, error) { func marshalMap(x any) (map[string]any, error) { val := reflect.ValueOf(x) if val.Kind() != reflect.Ptr { - return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind()) + return nil, fmt.Errorf("expected a pointer to a struct, got %v", val.Kind()) } if val.IsNil() { - return nil, errors.Errorf("expected a pointer to a struct, got nil pointer") + return nil, errors.New("expected a pointer to a struct, got nil pointer") } valElem := val.Elem() if valElem.Kind() != reflect.Struct { - return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + return nil, fmt.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) } typ := val.Type() m := make(map[string]any) @@ -54,7 +54,7 @@ var unmarshallableNames = map[string]struct{}{"FullHeader": {}} // It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") func marshalForMethod(typ reflect.Method, val reflect.Value) (string, any, error) { if val.Kind() != reflect.Func { - return "", nil, errors.Errorf("expected func, got %v", val.Kind()) + return "", nil, fmt.Errorf("expected func, got %v", val.Kind()) } name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() _, blackListed := unmarshallableNames[name] diff --git a/vendor/github.com/docker/cli/cli/command/formatter/volume.go b/vendor/github.com/docker/cli/cli/command/formatter/volume.go index bf9ea5d44..e3d4b1922 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/volume.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/volume.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/volume" "github.com/docker/go-units" + "github.com/moby/moby/api/types/volume" ) const ( @@ -40,10 +40,10 @@ func NewVolumeFormat(source string, quiet bool) Format { } // VolumeWrite writes formatted volumes using the Context -func VolumeWrite(ctx Context, volumes []*volume.Volume) error { +func VolumeWrite(ctx Context, volumes []volume.Volume) error { render := func(format func(subContext SubContext) error) error { for _, vol := range volumes { - if err := format(&volumeContext{v: *vol}); err != nil { + if err := format(&volumeContext{v: vol}); err != nil { return err } } diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go index dd6e74d4d..87c81c6da 100644 --- a/vendor/github.com/docker/cli/cli/command/registry.go +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -2,6 +2,7 @@ package command import ( "context" + "errors" "fmt" "os" "runtime" @@ -15,9 +16,9 @@ import ( "github.com/docker/cli/cli/streams" "github.com/docker/cli/internal/prompt" "github.com/docker/cli/internal/tui" - registrytypes "github.com/docker/docker/api/types/registry" + "github.com/moby/moby/api/pkg/authconfig" + registrytypes "github.com/moby/moby/api/types/registry" "github.com/morikuni/aec" - "github.com/pkg/errors" ) const ( @@ -34,42 +35,11 @@ const ( // [registry.IndexServer]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/registry#IndexServer const authConfigKey = "https://index.docker.io/v1/" -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command to prompt the user for username and password. -// -// Deprecated: this function is no longer used and will be removed in the next release. -func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) registrytypes.RequestAuthConfig { - configKey := getAuthConfigKey(index.Name) - isDefaultRegistry := configKey == authConfigKey || index.Official - return func(ctx context.Context) (string, error) { - _, _ = fmt.Fprintf(cli.Out(), "\nLogin prior to %s:\n", cmdName) - authConfig, err := GetDefaultAuthConfig(cli.ConfigFile(), true, configKey, isDefaultRegistry) - if err != nil { - _, _ = fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", configKey, err) - } - - select { - case <-ctx.Done(): - return "", ctx.Err() - default: - } - - authConfig, err = PromptUserForCredentials(ctx, cli, "", "", authConfig.Username, configKey) - if err != nil { - return "", err - } - return registrytypes.EncodeAuthConfig(authConfig) - } -} - // ResolveAuthConfig returns auth-config for the given registry from the // credential-store. It returns an empty AuthConfig if no credentials were // found. // -// It is similar to [registry.ResolveAuthConfig], but uses the credentials- -// store, instead of looking up credentials from a map. -// -// [registry.ResolveAuthConfig]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/registry#ResolveAuthConfig +// Deprecated: this function is no longer used, and will be removed in the next release. func ResolveAuthConfig(cfg *configfile.ConfigFile, index *registrytypes.IndexInfo) registrytypes.AuthConfig { configKey := index.Name if index.Official { @@ -77,7 +47,16 @@ func ResolveAuthConfig(cfg *configfile.ConfigFile, index *registrytypes.IndexInf } a, _ := cfg.GetAuthConfig(configKey) - return registrytypes.AuthConfig(a) + return registrytypes.AuthConfig{ + Username: a.Username, + Password: a.Password, + ServerAddress: a.ServerAddress, + + // TODO(thaJeztah): Are these expected to be included? + Auth: a.Auth, + IdentityToken: a.IdentityToken, + RegistryToken: a.RegistryToken, + } } // GetDefaultAuthConfig gets the default auth config given a serverAddress @@ -86,19 +65,27 @@ func GetDefaultAuthConfig(cfg *configfile.ConfigFile, checkCredStore bool, serve if !isDefaultRegistry { serverAddress = credentials.ConvertToHostname(serverAddress) } - authconfig := configtypes.AuthConfig{} + authCfg := configtypes.AuthConfig{} var err error if checkCredStore { - authconfig, err = cfg.GetAuthConfig(serverAddress) + authCfg, err = cfg.GetAuthConfig(serverAddress) if err != nil { return registrytypes.AuthConfig{ ServerAddress: serverAddress, }, err } } - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - return registrytypes.AuthConfig(authconfig), nil + + return registrytypes.AuthConfig{ + Username: authCfg.Username, + Password: authCfg.Password, + ServerAddress: serverAddress, + + // TODO(thaJeztah): Are these expected to be included? + Auth: authCfg.Auth, + IdentityToken: "", + RegistryToken: authCfg.RegistryToken, + }, nil } // PromptUserForCredentials handles the CLI prompt for the user to input @@ -153,7 +140,7 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword argUser = defaultUsername } if argUser == "" { - return registrytypes.AuthConfig{}, errors.Errorf("Error: Non-null Username Required") + return registrytypes.AuthConfig{}, errors.New("error: username is required") } } @@ -185,7 +172,7 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword } _, _ = fmt.Fprintln(cli.Out()) if argPassword == "" { - return registrytypes.AuthConfig{}, errors.Errorf("Error: Password Required") + return registrytypes.AuthConfig{}, errors.New("error: password is required") } } @@ -213,7 +200,16 @@ func RetrieveAuthTokenFromImage(cfg *configfile.ConfigFile, image string) (strin return "", err } - encodedAuth, err := registrytypes.EncodeAuthConfig(registrytypes.AuthConfig(authConfig)) + encodedAuth, err := authconfig.Encode(registrytypes.AuthConfig{ + Username: authConfig.Username, + Password: authConfig.Password, + ServerAddress: authConfig.ServerAddress, + + // TODO(thaJeztah): Are these expected to be included? + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + }) if err != nil { return "", err } diff --git a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go index 5f87e2912..a4de39b8f 100644 --- a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go +++ b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go @@ -12,11 +12,10 @@ import ( "time" "github.com/docker/cli/cli/command/formatter" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" + "github.com/moby/moby/api/types/swarm" + "github.com/moby/moby/client" + "github.com/moby/moby/client/pkg/progress" + "github.com/moby/moby/client/pkg/streamformatter" ) var ( @@ -88,24 +87,24 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID ) for { - service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, swarm.ServiceInspectOptions{}) + res, err := apiClient.ServiceInspect(ctx, serviceID, client.ServiceInspectOptions{}) if err != nil { return err } - if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 { - monitor = service.Spec.UpdateConfig.Monitor + if res.Service.Spec.UpdateConfig != nil && res.Service.Spec.UpdateConfig.Monitor != 0 { + monitor = res.Service.Spec.UpdateConfig.Monitor } if updater == nil { - updater, err = initializeUpdater(service, progressOut) + updater, err = initializeUpdater(res.Service, progressOut) if err != nil { return err } } - if service.UpdateStatus != nil { - switch service.UpdateStatus.State { + if res.Service.UpdateStatus != nil { + switch res.Service.UpdateStatus.State { case swarm.UpdateStateUpdating: rollback = false case swarm.UpdateStateCompleted: @@ -113,39 +112,38 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID return nil } case swarm.UpdateStatePaused: - return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) + return fmt.Errorf("service update paused: %s", res.Service.UpdateStatus.Message) case swarm.UpdateStateRollbackStarted: - if !rollback && service.UpdateStatus.Message != "" { + if !rollback && res.Service.UpdateStatus.Message != "" { progressOut.WriteProgress(progress.Progress{ ID: "rollback", - Action: service.UpdateStatus.Message, + Action: res.Service.UpdateStatus.Message, }) } rollback = true case swarm.UpdateStateRollbackPaused: - return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message) + return fmt.Errorf("service rollback paused: %s", res.Service.UpdateStatus.Message) case swarm.UpdateStateRollbackCompleted: if !converged { - message = &progress.Progress{ID: "rollback", Message: service.UpdateStatus.Message} + message = &progress.Progress{ID: "rollback", Message: res.Service.UpdateStatus.Message} } rollback = true } } if converged && time.Since(convergedAt) >= monitor { - progressOut.WriteProgress(progress.Progress{ + _ = progressOut.WriteProgress(progress.Progress{ ID: "verify", Action: fmt.Sprintf("Service %s converged", serviceID), }) if message != nil { - progressOut.WriteProgress(*message) + _ = progressOut.WriteProgress(*message) } return nil } - tasks, err := apiClient.TaskList(ctx, swarm.TaskListOptions{Filters: filters.NewArgs( - filters.KeyValuePair{Key: "service", Value: service.ID}, - filters.KeyValuePair{Key: "_up-to-date", Value: "true"}, - )}) + tasks, err := apiClient.TaskList(ctx, client.TaskListOptions{ + Filters: make(client.Filters).Add("service", res.Service.ID).Add("_up-to-date", "true"), + }) if err != nil { return err } @@ -155,7 +153,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID return err } - converged, err = updater.update(service, tasks, activeNodes, rollback) + converged, err = updater.update(res.Service, tasks.Items, activeNodes, rollback) if err != nil { return err } @@ -167,7 +165,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID // only job services have a non-nil job status, which means we can // use the presence of this field to check if the service is a job // here. - if service.JobStatus != nil { + if res.Service.JobStatus != nil { progress.Message(progressOut, "", "job complete") return nil } @@ -177,7 +175,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID } wait := monitor - time.Since(convergedAt) if wait >= 0 { - progressOut.WriteProgress(progress.Progress{ + _ = progressOut.WriteProgress(progress.Progress{ // Ideally this would have no ID, but // the progress rendering code behaves // poorly on an "action" with no ID. It @@ -192,7 +190,7 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID } } else { if !convergedAt.IsZero() { - progressOut.WriteProgress(progress.Progress{ + _ = progressOut.WriteProgress(progress.Progress{ ID: "verify", Action: "Detected task failure", }) @@ -216,13 +214,13 @@ func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID // // TODO(thaJeztah): this should really be a filter on [apiClient.NodeList] instead of being filtered on the client side. func getActiveNodes(ctx context.Context, apiClient client.NodeAPIClient) (map[string]struct{}, error) { - nodes, err := apiClient.NodeList(ctx, swarm.NodeListOptions{}) + res, err := apiClient.NodeList(ctx, client.NodeListOptions{}) if err != nil { return nil, err } activeNodes := make(map[string]struct{}) - for _, n := range nodes { + for _, n := range res.Items { if n.Status.State != swarm.NodeStateDown { activeNodes[n.ID] = struct{}{} } @@ -652,7 +650,7 @@ func (u *replicatedJobProgressUpdater) update(_ swarm.Service, tasks []swarm.Tas } func (u *replicatedJobProgressUpdater) writeOverallProgress(active, completed int) { - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: "job progress", Action: fmt.Sprintf( // * means "use the next positional arg to compute padding" @@ -669,7 +667,7 @@ func (u *replicatedJobProgressUpdater) writeOverallProgress(active, completed in actualDesired = u.concurrent } - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: "active tasks", Action: fmt.Sprintf( // [n] notation lets us select a specific argument, 1-indexed @@ -692,14 +690,14 @@ func (u *replicatedJobProgressUpdater) writeTaskProgress(task swarm.Task) { } if task.Status.Err != "" { - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: fmt.Sprintf("%d/%d", task.Slot+1, u.total), Action: truncError(task.Status.Err), }) return } - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: fmt.Sprintf("%d/%d", task.Slot+1, u.total), Action: fmt.Sprintf("%-*s", longestState, task.Status.State), Current: numberedStates[task.Status.State], @@ -732,7 +730,7 @@ func (u *globalJobProgressUpdater) update(service swarm.Service, tasks []swarm.T if !u.initialized { // if there are not yet tasks, then return early. if len(tasks) == 0 && len(activeNodes) != 0 { - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: "job progress", Action: "waiting for tasks", }) @@ -810,14 +808,14 @@ func (u *globalJobProgressUpdater) writeTaskProgress(task swarm.Task) { } if task.Status.Err != "" { - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: task.NodeID, Action: truncError(task.Status.Err), }) return } - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ ID: task.NodeID, Action: fmt.Sprintf("%-*s", longestState, task.Status.State), Current: numberedStates[task.Status.State], @@ -829,7 +827,7 @@ func (u *globalJobProgressUpdater) writeTaskProgress(task swarm.Task) { func (u *globalJobProgressUpdater) writeOverallProgress(complete int) { // all tasks for a global job are active at once, so we only write out the // total progress. - u.progressOut.WriteProgress(progress.Progress{ + _ = u.progressOut.WriteProgress(progress.Progress{ // see (*replicatedJobProgressUpdater).writeOverallProgress for an // explanation of the advanced fmt use in this function. ID: "job progress", diff --git a/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go b/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go deleted file mode 100644 index 7cfe8a89a..000000000 --- a/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go +++ /dev/null @@ -1,82 +0,0 @@ -package formatter - -import ( - "strconv" - - "github.com/docker/cli/cli/command/formatter" -) - -const ( - // SwarmStackTableFormat is the default Swarm stack format - // - // Deprecated: this type was for internal use and will be removed in the next release. - SwarmStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}" - - stackServicesHeader = "SERVICES" - - // TableFormatKey is an alias for formatter.TableFormatKey - // - // Deprecated: this type was for internal use and will be removed in the next release. - TableFormatKey = formatter.TableFormatKey -) - -// Context is an alias for formatter.Context -// -// Deprecated: this type was for internal use and will be removed in the next release. -type Context = formatter.Context - -// Format is an alias for formatter.Format -// -// Deprecated: this type was for internal use and will be removed in the next release. -type Format = formatter.Format - -// Stack contains deployed stack information. -// -// Deprecated: this type was for internal use and will be removed in the next release. -type Stack struct { - // Name is the name of the stack - Name string - // Services is the number of the services - Services int -} - -// StackWrite writes formatted stacks using the Context -// -// Deprecated: this function was for internal use and will be removed in the next release. -func StackWrite(ctx formatter.Context, stacks []*Stack) error { - render := func(format func(subContext formatter.SubContext) error) error { - for _, stack := range stacks { - if err := format(&stackContext{s: stack}); err != nil { - return err - } - } - return nil - } - return ctx.Write(newStackContext(), render) -} - -type stackContext struct { - formatter.HeaderContext - s *Stack -} - -func newStackContext() *stackContext { - stackCtx := stackContext{} - stackCtx.Header = formatter.SubHeaderContext{ - "Name": formatter.NameHeader, - "Services": stackServicesHeader, - } - return &stackCtx -} - -func (s *stackContext) MarshalJSON() ([]byte, error) { - return formatter.MarshalJSON(s) -} - -func (s *stackContext) Name() string { - return s.s.Name -} - -func (s *stackContext) Services() string { - return strconv.Itoa(s.s.Services) -} diff --git a/vendor/github.com/docker/cli/cli/command/telemetry.go b/vendor/github.com/docker/cli/cli/command/telemetry.go index e8e6296b5..93f0ed27a 100644 --- a/vendor/github.com/docker/cli/cli/command/telemetry.go +++ b/vendor/github.com/docker/cli/cli/command/telemetry.go @@ -11,11 +11,12 @@ import ( "github.com/google/uuid" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" + otelsdk "go.opentelemetry.io/otel/sdk" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) @@ -146,7 +147,7 @@ func defaultResourceOptions() []resource.Option { semconv.ServiceInstanceID(uuid.NewString()), ), resource.WithFromEnv(), - resource.WithTelemetrySDK(), + resource.WithDetectors(telemetrySDK{}), } } @@ -157,7 +158,10 @@ func (r *telemetryResource) AppendOptions(opts ...resource.Option) { r.opts = append(r.opts, opts...) } -type serviceNameDetector struct{} +type ( + serviceNameDetector struct{} + telemetrySDK struct{} +) func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) { return resource.StringDetector( @@ -169,6 +173,16 @@ func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, erro ).Detect(ctx) } +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*resource.Resource, error) { + return resource.NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(otelsdk.Version()), + ), nil +} + // cliReader is an implementation of Reader that will automatically // report to a designated Exporter when Shutdown is called. type cliReader struct { diff --git a/vendor/github.com/docker/cli/cli/command/telemetry_docker.go b/vendor/github.com/docker/cli/cli/command/telemetry_docker.go index 6598997d6..f0a43a425 100644 --- a/vendor/github.com/docker/cli/cli/command/telemetry_docker.go +++ b/vendor/github.com/docker/cli/cli/command/telemetry_docker.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package command @@ -14,7 +14,6 @@ import ( "strings" "unicode" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" @@ -48,7 +47,7 @@ func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) { if otelCfg != nil { otelMap, ok := otelCfg.(map[string]any) if !ok { - otel.Handle(errors.Errorf( + otel.Handle(fmt.Errorf( "unexpected type for field %q: %T (expected: %T)", otelContextFieldName, otelCfg, @@ -76,7 +75,7 @@ func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) { // We pretend we're the same as the environment reader. u, err := url.Parse(endpoint) if err != nil { - otel.Handle(errors.Errorf("docker otel endpoint is invalid: %s", err)) + otel.Handle(fmt.Errorf("docker otel endpoint is invalid: %s", err)) return "", false } diff --git a/vendor/github.com/docker/cli/cli/command/telemetry_utils.go b/vendor/github.com/docker/cli/cli/command/telemetry_utils.go index 680415b63..dadbd13fd 100644 --- a/vendor/github.com/docker/cli/cli/command/telemetry_utils.go +++ b/vendor/github.com/docker/cli/cli/command/telemetry_utils.go @@ -2,12 +2,12 @@ package command import ( "context" + "errors" "fmt" "strings" "time" "github.com/docker/cli/cli/version" - "github.com/pkg/errors" "github.com/spf13/cobra" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go index fdedc2a14..dcbeed2f7 100644 --- a/vendor/github.com/docker/cli/cli/command/utils.go +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -1,74 +1,30 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package command import ( - "context" - "io" + "errors" + "fmt" "os" "path/filepath" "strings" "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/streams" - "github.com/docker/cli/internal/prompt" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" + "github.com/moby/moby/client" ) -// ErrPromptTerminated is returned if the user terminated the prompt. -// -// Deprecated: this error is for internal use and will be removed in the next release. -const ErrPromptTerminated = prompt.ErrTerminated - -// DisableInputEcho disables input echo on the provided streams.In. -// This is useful when the user provides sensitive information like passwords. -// The function returns a restore function that should be called to restore the -// terminal state. -// -// Deprecated: this function is for internal use and will be removed in the next release. -func DisableInputEcho(ins *streams.In) (restore func() error, err error) { - return prompt.DisableInputEcho(ins) -} - -// PromptForInput requests input from the user. -// -// If the user terminates the CLI with SIGINT or SIGTERM while the prompt is -// active, the prompt will return an empty string ("") with an ErrPromptTerminated error. -// When the prompt returns an error, the caller should propagate the error up -// the stack and close the io.Reader used for the prompt which will prevent the -// background goroutine from blocking indefinitely. -// -// Deprecated: this function is for internal use and will be removed in the next release. -func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message string) (string, error) { - return prompt.ReadInput(ctx, in, out, message) -} - -// PromptForConfirmation requests and checks confirmation from the user. -// This will display the provided message followed by ' [y/N] '. If the user -// input 'y' or 'Y' it returns true otherwise false. If no message is provided, -// "Are you sure you want to proceed? [y/N] " will be used instead. -// -// If the user terminates the CLI with SIGINT or SIGTERM while the prompt is -// active, the prompt will return false with an ErrPromptTerminated error. -// When the prompt returns an error, the caller should propagate the error up -// the stack and close the io.Reader used for the prompt which will prevent the -// background goroutine from blocking indefinitely. -// -// Deprecated: this function is for internal use and will be removed in the next release. -func PromptForConfirmation(ctx context.Context, ins io.Reader, outs io.Writer, message string) (bool, error) { - return prompt.Confirm(ctx, ins, outs, message) -} - // PruneFilters merges prune filters specified in config.json with those specified -// as command-line flags. +// as command-line flags. It returns a deep copy of filters to prevent mutating +// the original. // // CLI label filters have precedence over those specified in config.json. If a // label filter specified as flag conflicts with a label defined in config.json // (i.e., "label=some-value" conflicts with "label!=some-value", and vice versa), // then the filter defined in config.json is omitted. -func PruneFilters(dockerCLI config.Provider, pruneFilters filters.Args) filters.Args { +func PruneFilters(dockerCLI config.Provider, filters client.Filters) client.Filters { + pruneFilters := filters.Clone() + cfg := dockerCLI.ConfigFile() if cfg == nil { return pruneFilters @@ -84,13 +40,13 @@ func PruneFilters(dockerCLI config.Provider, pruneFilters filters.Args) filters. switch k { case "label": // "label != some-value" conflicts with "label = some-value" - if pruneFilters.ExactMatch("label!", v) { + if pruneFilters["label!"][v] { continue } pruneFilters.Add(k, v) case "label!": // "label != some-value" conflicts with "label = some-value" - if pruneFilters.ExactMatch("label", v) { + if pruneFilters["label"][v] { continue } pruneFilters.Add(k, v) @@ -107,7 +63,7 @@ func ValidateOutputPath(path string) error { dir := filepath.Dir(filepath.Clean(path)) if dir != "" && dir != "." { if _, err := os.Stat(dir); os.IsNotExist(err) { - return errors.Errorf("invalid output path: directory %q does not exist", dir) + return fmt.Errorf("invalid output path: directory %q does not exist", dir) } } // check whether `path` points to a regular file @@ -122,7 +78,7 @@ func ValidateOutputPath(path string) error { } if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil { - return errors.Wrapf(err, "invalid output path: %q must be a directory or a regular file", path) + return fmt.Errorf("invalid output path: %q must be a directory or a regular file: %w", path, err) } } return nil diff --git a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go index c7bee693a..25fafdd44 100644 --- a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go +++ b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go @@ -1,14 +1,14 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package interpolation import ( + "fmt" "os" "strings" "github.com/docker/cli/cli/compose/template" - "github.com/pkg/errors" ) // Options supported by Interpolate @@ -68,7 +68,7 @@ func recursiveInterpolate(value any, path Path, opts Options) (any, error) { } casted, err := caster(newValue) if err != nil { - return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type")) + return casted, newPathError(path, fmt.Errorf("failed to cast to expected type: %w", err)) } return casted, nil @@ -104,11 +104,11 @@ func newPathError(path Path, err error) error { case nil: return nil case *template.InvalidTemplateError: - return errors.Errorf( + return fmt.Errorf( "invalid interpolation format for %s: %#v; you may need to escape any $ with another $", path, err.Template) default: - return errors.Wrapf(err, "error while interpolating %s", path) + return fmt.Errorf("error while interpolating %s: %w", path, err) } } diff --git a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go index 93ac9d83d..edacd4f83 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go @@ -1,14 +1,14 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package loader import ( + "fmt" "strconv" "strings" interp "github.com/docker/cli/cli/compose/interpolation" - "github.com/pkg/errors" ) var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ @@ -67,7 +67,7 @@ func toBoolean(value string) (any, error) { case "n", "no", "false", "off": return false, nil default: - return nil, errors.Errorf("invalid boolean: %s", value) + return nil, fmt.Errorf("invalid boolean: %s", value) } } diff --git a/vendor/github.com/docker/cli/cli/compose/loader/loader.go b/vendor/github.com/docker/cli/cli/compose/loader/loader.go index 3b788ca04..dda62f862 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/loader.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/loader.go @@ -1,9 +1,10 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package loader import ( + "errors" "fmt" "path" "path/filepath" @@ -20,14 +21,14 @@ import ( "github.com/docker/cli/internal/volumespec" "github.com/docker/cli/opts" "github.com/docker/cli/opts/swarmopts" - "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/nat" "github.com/docker/go-units" "github.com/go-viper/mapstructure/v2" "github.com/google/shlex" - "github.com/pkg/errors" + "github.com/moby/moby/api/types/network" + "github.com/moby/moby/client/pkg/versions" "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) // Options supported by Load @@ -64,7 +65,7 @@ func ParseYAML(source []byte) (map[string]any, error) { } _, ok := cfg.(map[string]any) if !ok { - return nil, errors.Errorf("top-level object must be a mapping") + return nil, errors.New("top-level object must be a mapping") } converted, err := convertToStringKeysRecursive(cfg, "") if err != nil { @@ -76,7 +77,7 @@ func ParseYAML(source []byte) (map[string]any, error) { // Load reads a ConfigDetails and returns a fully loaded configuration func Load(configDetails types.ConfigDetails, opt ...func(*Options)) (*types.Config, error) { if len(configDetails.ConfigFiles) < 1 { - return nil, errors.Errorf("No files specified") + return nil, errors.New("no files specified") } options := &Options{ @@ -101,7 +102,7 @@ func Load(configDetails types.ConfigDetails, opt ...func(*Options)) (*types.Conf configDetails.Version = version } if configDetails.Version != version { - return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version) + return nil, fmt.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version) } if err := validateForbidden(configDict); err != nil { @@ -532,7 +533,7 @@ func transformUlimits(data any) (any, error) { ulimit.Hard = value["hard"].(int) return ulimit, nil default: - return data, errors.Errorf("invalid type %T for ulimits", value) + return data, fmt.Errorf("invalid type %T for ulimits", value) } } @@ -544,33 +545,31 @@ func LoadNetworks(source map[string]any, version string) (map[string]types.Netwo if err != nil { return networks, err } - for name, network := range networks { - if !network.External.External { + for name, nw := range networks { + if !nw.External.External { continue } switch { - case network.External.Name != "": - if network.Name != "" { - return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) + case nw.External.Name != "": + if nw.Name != "" { + return nil, fmt.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) } if versions.GreaterThanOrEqualTo(version, "3.5") { logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name) } - network.Name = network.External.Name - network.External.Name = "" - case network.Name == "": - network.Name = name + nw.Name = nw.External.Name + nw.External.Name = "" + case nw.Name == "": + nw.Name = name } - network.Extras = loadExtras(name, source) - networks[name] = network + nw.Extras = loadExtras(name, source) + networks[name] = nw } return networks, nil } func externalVolumeError(volume, key string) error { - return errors.Errorf( - "conflicting parameters \"external\" and %q specified for volume %q", - key, volume) + return fmt.Errorf(`conflicting parameters "external" and %q specified for volume %q`, key, volume) } // LoadVolumes produces a VolumeConfig map from a compose file Dict @@ -594,7 +593,7 @@ func LoadVolumes(source map[string]any, version string) (map[string]types.Volume return nil, externalVolumeError(name, "labels") case volume.External.Name != "": if volume.Name != "" { - return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name) + return nil, fmt.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name) } if versions.GreaterThanOrEqualTo(version, "3.4") { logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name) @@ -655,7 +654,7 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi // handle deprecated external.name if obj.External.Name != "" { if obj.Name != "" { - return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name) + return obj, fmt.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name) } if versions.GreaterThanOrEqualTo(details.Version, "3.5") { logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name) @@ -668,7 +667,7 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi // if not "external: true" case obj.Driver != "": if obj.File != "" { - return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) + return obj, fmt.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) } default: obj.File = absPath(details.WorkingDir, obj.File) @@ -691,7 +690,7 @@ var transformMapStringString TransformerFunc = func(data any) (any, error) { case map[string]string: return value, nil default: - return data, errors.Errorf("invalid type %T for map[string]string", value) + return data, fmt.Errorf("invalid type %T for map[string]string", value) } } @@ -702,7 +701,7 @@ var transformExternal TransformerFunc = func(data any) (any, error) { case map[string]any: return map[string]any{"external": true, "name": value["name"]}, nil default: - return data, errors.Errorf("invalid type %T for external", value) + return data, fmt.Errorf("invalid type %T for external", value) } } @@ -730,12 +729,12 @@ var transformServicePort TransformerFunc = func(data any) (any, error) { case map[string]any: ports = append(ports, value) default: - return data, errors.Errorf("invalid type %T for port", value) + return data, fmt.Errorf("invalid type %T for port", value) } } return ports, nil default: - return data, errors.Errorf("invalid type %T for port", entries) + return data, fmt.Errorf("invalid type %T for port", entries) } } @@ -746,7 +745,7 @@ var transformStringSourceMap TransformerFunc = func(data any) (any, error) { case map[string]any: return data, nil default: - return data, errors.Errorf("invalid type %T for secret", value) + return data, fmt.Errorf("invalid type %T for secret", value) } } @@ -757,7 +756,7 @@ var transformBuildConfig TransformerFunc = func(data any) (any, error) { case map[string]any: return data, nil default: - return data, errors.Errorf("invalid type %T for service build", value) + return data, fmt.Errorf("invalid type %T for service build", value) } } @@ -768,7 +767,7 @@ var transformServiceVolumeConfig TransformerFunc = func(data any) (any, error) { case map[string]any: return data, nil default: - return data, errors.Errorf("invalid type %T for service volume", value) + return data, fmt.Errorf("invalid type %T for service volume", value) } } @@ -799,7 +798,7 @@ var transformStringList TransformerFunc = func(data any) (any, error) { case []any: return value, nil default: - return data, errors.Errorf("invalid type %T for string list", value) + return data, fmt.Errorf("invalid type %T for string list", value) } } @@ -846,7 +845,7 @@ func transformListOrMapping(listOrMapping any, sep string, allowNil bool, allowS } return result } - panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping)) + panic(fmt.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping)) } func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc { @@ -874,7 +873,7 @@ func transformMappingOrList(mappingOrList any, sep string, allowNil bool) any { } return result } - panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList)) + panic(fmt.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList)) } var transformShellCommand TransformerFunc = func(value any) (any, error) { @@ -891,7 +890,7 @@ var transformHealthCheckTest TransformerFunc = func(data any) (any, error) { case []any: return value, nil default: - return value, errors.Errorf("invalid type %T for healthcheck.test", value) + return value, fmt.Errorf("invalid type %T for healthcheck.test", value) } } @@ -902,7 +901,7 @@ var transformSize TransformerFunc = func(value any) (any, error) { case string: return units.RAMInBytes(value) } - panic(errors.Errorf("invalid type for size %T", value)) + panic(fmt.Errorf("invalid type for size %T", value)) } var transformStringToDuration TransformerFunc = func(value any) (any, error) { @@ -914,27 +913,33 @@ var transformStringToDuration TransformerFunc = func(value any) (any, error) { } return types.Duration(d), nil default: - return value, errors.Errorf("invalid type %T for duration", value) + return value, fmt.Errorf("invalid type %T for duration", value) } } func toServicePortConfigs(value string) ([]any, error) { - var portConfigs []any - + // short syntax ([ip:]public:private[/proto]) + // + // TODO(thaJeztah): we need an equivalent that handles the "ip-address" part without depending on the nat package. ports, portBindings, err := nat.ParsePortSpecs([]string{value}) if err != nil { return nil, err } // We need to sort the key of the ports to make sure it is consistent - keys := []string{} + keys := make([]string, 0, len(ports)) for port := range ports { keys = append(keys, string(port)) } sort.Strings(keys) + var portConfigs []any for _, key := range keys { // Reuse ConvertPortToPortConfig so that it is consistent - portConfig, err := swarmopts.ConvertPortToPortConfig(nat.Port(key), portBindings) + port, err := network.ParsePort(key) + if err != nil { + return nil, err + } + portConfig, err := swarmopts.ConvertPortToPortConfig(port, portBindings) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/cli/cli/compose/loader/merge.go b/vendor/github.com/docker/cli/cli/compose/loader/merge.go index 8c0f35db4..a321fd18c 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/merge.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/merge.go @@ -1,15 +1,15 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package loader import ( + "fmt" "reflect" "sort" "dario.cat/mergo" "github.com/docker/cli/cli/compose/types" - "github.com/pkg/errors" ) type specials struct { @@ -29,23 +29,23 @@ func merge(configs []*types.Config) (*types.Config, error) { var err error base.Services, err = mergeServices(base.Services, override.Services) if err != nil { - return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename) + return base, fmt.Errorf("cannot merge services from %s: %w", override.Filename, err) } base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes) if err != nil { - return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename) + return base, fmt.Errorf("cannot merge volumes from %s: %w", override.Filename, err) } base.Networks, err = mergeNetworks(base.Networks, override.Networks) if err != nil { - return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename) + return base, fmt.Errorf("cannot merge networks from %s: %w", override.Filename, err) } base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets) if err != nil { - return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename) + return base, fmt.Errorf("cannot merge secrets from %s: %w", override.Filename, err) } base.Configs, err = mergeConfigs(base.Configs, override.Configs) if err != nil { - return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename) + return base, fmt.Errorf("cannot merge configs from %s: %w", override.Filename, err) } } return base, nil @@ -70,7 +70,7 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, for name, overrideService := range overrideServices { if baseService, ok := baseServices[name]; ok { if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil { - return base, errors.Wrapf(err, "cannot merge service %s", name) + return base, fmt.Errorf("cannot merge service %s: %w", name, err) } baseServices[name] = baseService continue @@ -88,7 +88,7 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, func toServiceSecretConfigsMap(s any) (map[any]any, error) { secrets, ok := s.([]types.ServiceSecretConfig) if !ok { - return nil, errors.Errorf("not a serviceSecretConfig: %v", s) + return nil, fmt.Errorf("not a serviceSecretConfig: %v", s) } m := map[any]any{} for _, secret := range secrets { @@ -100,7 +100,7 @@ func toServiceSecretConfigsMap(s any) (map[any]any, error) { func toServiceConfigObjConfigsMap(s any) (map[any]any, error) { secrets, ok := s.([]types.ServiceConfigObjConfig) if !ok { - return nil, errors.Errorf("not a serviceSecretConfig: %v", s) + return nil, fmt.Errorf("not a serviceSecretConfig: %v", s) } m := map[any]any{} for _, secret := range secrets { @@ -112,7 +112,7 @@ func toServiceConfigObjConfigsMap(s any) (map[any]any, error) { func toServicePortConfigsMap(s any) (map[any]any, error) { ports, ok := s.([]types.ServicePortConfig) if !ok { - return nil, errors.Errorf("not a servicePortConfig slice: %v", s) + return nil, fmt.Errorf("not a servicePortConfig slice: %v", s) } m := map[any]any{} for _, p := range ports { @@ -124,7 +124,7 @@ func toServicePortConfigsMap(s any) (map[any]any, error) { func toServiceVolumeConfigsMap(s any) (map[any]any, error) { volumes, ok := s.([]types.ServiceVolumeConfig) if !ok { - return nil, errors.Errorf("not a serviceVolumeConfig slice: %v", s) + return nil, fmt.Errorf("not a serviceVolumeConfig slice: %v", s) } m := map[any]any{} for _, v := range volumes { @@ -211,7 +211,7 @@ func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src ref func sliceToMap(tomap tomapFn, v reflect.Value) (map[any]any, error) { // check if valid if !v.IsValid() { - return nil, errors.Errorf("invalid value : %+v", v) + return nil, fmt.Errorf("invalid value : %+v", v) } return tomap(v.Interface()) } diff --git a/vendor/github.com/docker/cli/cli/compose/schema/schema.go b/vendor/github.com/docker/cli/cli/compose/schema/schema.go index 1484410da..ea8543a85 100644 --- a/vendor/github.com/docker/cli/cli/compose/schema/schema.go +++ b/vendor/github.com/docker/cli/cli/compose/schema/schema.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package schema @@ -11,7 +11,6 @@ import ( "time" "github.com/docker/go-connections/nat" - "github.com/pkg/errors" "github.com/xeipuuv/gojsonschema" ) @@ -80,7 +79,7 @@ func Validate(config map[string]any, version string) error { version = normalizeVersion(version) schemaData, err := schemas.ReadFile("data/config_schema_v" + version + ".json") if err != nil { - return errors.Errorf("unsupported Compose file version: %s", version) + return fmt.Errorf("unsupported Compose file version: %s", version) } schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) diff --git a/vendor/github.com/docker/cli/cli/compose/template/template.go b/vendor/github.com/docker/cli/cli/compose/template/template.go index b823b4998..b8a77b9bc 100644 --- a/vendor/github.com/docker/cli/cli/compose/template/template.go +++ b/vendor/github.com/docker/cli/cli/compose/template/template.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package template diff --git a/vendor/github.com/docker/cli/cli/compose/types/types.go b/vendor/github.com/docker/cli/cli/compose/types/types.go index 9d0c11b40..fdc0eb44f 100644 --- a/vendor/github.com/docker/cli/cli/compose/types/types.go +++ b/vendor/github.com/docker/cli/cli/compose/types/types.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package types diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index cbb34486a..5a6378050 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -13,7 +13,6 @@ import ( "github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" - "github.com/pkg/errors" ) const ( @@ -101,7 +100,7 @@ func SetDir(dir string) { func Path(p ...string) (string, error) { path := filepath.Join(append([]string{Dir()}, p...)...) if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { - return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + return "", fmt.Errorf("path %q is outside of root config directory %q", path, Dir()) } return path, nil } @@ -143,12 +142,12 @@ func load(configDir string) (*configfile.ConfigFile, error) { return configFile, nil } // Any other error happening when failing to read the file must be returned. - return configFile, errors.Wrap(err, "loading config file") + return configFile, fmt.Errorf("loading config file: %w", err) } - defer file.Close() + defer func() { _ = file.Close() }() err = configFile.LoadFromReader(file) if err != nil { - err = errors.Wrapf(err, "parsing config file (%s)", filename) + err = fmt.Errorf("parsing config file (%s): %w", filename, err) } return configFile, err } diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index 530c52285..fab3ed4cb 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,6 +3,7 @@ package configfile import ( "encoding/base64" "encoding/json" + "errors" "fmt" "io" "os" @@ -12,7 +13,6 @@ import ( "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/memorystore" "github.com/docker/cli/cli/config/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,9 +43,6 @@ type ConfigFile struct { Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Features map[string]string `json:"features,omitempty"` - - // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release. - Experimental string `json:"experimental,omitempty"` } type configEnvAuth struct { @@ -167,7 +164,7 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { // Save encodes and writes out all the authorization information func (configFile *ConfigFile) Save() (retErr error) { if configFile.Filename == "" { - return errors.Errorf("Can't save config with empty filename") + return errors.New("can't save config with empty filename") } dir := filepath.Dir(configFile.Filename) @@ -194,7 +191,7 @@ func (configFile *ConfigFile) Save() (retErr error) { } if err := temp.Close(); err != nil { - return errors.Wrap(err, "error closing temp file") + return fmt.Errorf("error closing temp file: %w", err) } // Handle situation where the configfile is a symlink, and allow for dangling symlinks @@ -278,11 +275,11 @@ func decodeAuth(authStr string) (string, string, error) { return "", "", err } if n > decLen { - return "", "", errors.Errorf("Something went wrong decoding auth config") + return "", "", errors.New("something went wrong decoding auth config") } userName, password, ok := strings.Cut(string(decoded), ":") if !ok || userName == "" { - return "", "", errors.Errorf("Invalid auth configuration file") + return "", "", errors.New("invalid auth configuration file") } return userName, strings.Trim(password, "\x00"), nil } diff --git a/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/vendor/github.com/docker/cli/cli/config/memorystore/store.go index 199083464..f8ec62b95 100644 --- a/vendor/github.com/docker/cli/cli/config/memorystore/store.go +++ b/vendor/github.com/docker/cli/cli/config/memorystore/store.go @@ -1,9 +1,9 @@ -//go:build go1.23 +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.24 package memorystore import ( - "errors" "fmt" "maps" "os" @@ -13,12 +13,17 @@ import ( "github.com/docker/cli/cli/config/types" ) -var errValueNotFound = errors.New("value not found") +// notFoundErr is the error returned when a plugin could not be found. +type notFoundErr string -func IsErrValueNotFound(err error) bool { - return errors.Is(err, errValueNotFound) +func (notFoundErr) NotFound() {} + +func (e notFoundErr) Error() string { + return string(e) } +var errValueNotFound notFoundErr = "value not found" + type Config struct { lock sync.RWMutex memoryCredentials map[string]types.AuthConfig diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go index 95eb27c86..9fe90003b 100644 --- a/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -6,11 +6,6 @@ type AuthConfig struct { Password string `json:"password,omitempty"` Auth string `json:"auth,omitempty"` - // Email is an optional value associated with the username. - // - // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. - Email string `json:"email,omitempty"` - ServerAddress string `json:"serveraddress,omitempty"` // IdentityToken is used to authenticate the user and get diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go index 4b04f8b39..8084a6532 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -233,11 +233,9 @@ func (c *commandConn) Close() error { defer c.closing.Store(false) if err := c.CloseRead(); err != nil { - logrus.Warnf("commandConn.Close: CloseRead: %v", err) return err } if err := c.CloseWrite(); err != nil { - logrus.Warnf("commandConn.Close: CloseWrite: %v", err) return err } diff --git a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go index 2fcb54a98..34ae267e2 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go +++ b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go @@ -175,7 +175,7 @@ func quoteCommand(commandAndArgs ...string) (string, error) { quotedCmd = a continue } - quotedCmd += " " + a + quotedCmd += " " + a //nolint:perfsprint // ignore "concat-loop"; no need to use a string-builder for this. } // each part is quoted appropriately, so now we'll have a full // shell command to pass off to "ssh" diff --git a/vendor/github.com/docker/cli/cli/context/docker/load.go b/vendor/github.com/docker/cli/cli/context/docker/load.go index 89d43e2e3..a5c44f934 100644 --- a/vendor/github.com/docker/cli/cli/context/docker/load.go +++ b/vendor/github.com/docker/cli/cli/context/docker/load.go @@ -4,6 +4,8 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" + "fmt" "net" "net/http" "strings" @@ -12,9 +14,8 @@ import ( "github.com/docker/cli/cli/connhelper" "github.com/docker/cli/cli/context" "github.com/docker/cli/cli/context/store" - "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" + "github.com/moby/moby/client" ) // EndpointMeta is a typed wrapper around a context-store generic endpoint describing @@ -68,7 +69,7 @@ func (ep *Endpoint) tlsConfig() (*tls.Config, error) { x509cert, err := tls.X509KeyPair(ep.TLSData.Cert, keyBytes) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve context tls info") + return nil, fmt.Errorf("failed to retrieve context tls info: %w", err) } tlsOpts = append(tlsOpts, func(cfg *tls.Config) { cfg.Certificates = []tls.Certificate{x509cert} @@ -101,7 +102,22 @@ func (ep *Endpoint) ClientOpts() ([]client.Opt, error) { if err != nil { return nil, err } - result = append(result, withHTTPClient(tlsConfig)) + + // If there's no tlsConfig available, we use the default HTTPClient. + if tlsConfig != nil { + result = append(result, + client.WithHTTPClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: (&net.Dialer{ + KeepAlive: 30 * time.Second, + Timeout: 30 * time.Second, + }).DialContext, + }, + CheckRedirect: client.CheckRedirect, + }), + ) + } } result = append(result, client.WithHost(ep.Host)) } else { @@ -118,7 +134,7 @@ func (ep *Endpoint) ClientOpts() ([]client.Opt, error) { } } - result = append(result, client.WithVersionFromEnv(), client.WithAPIVersionNegotiation()) + result = append(result, client.WithAPIVersionFromEnv()) return result, nil } @@ -133,25 +149,6 @@ func isSocket(addr string) bool { } } -func withHTTPClient(tlsConfig *tls.Config) func(*client.Client) error { - return func(c *client.Client) error { - if tlsConfig == nil { - // Use the default HTTPClient - return nil - } - return client.WithHTTPClient(&http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - DialContext: (&net.Dialer{ - KeepAlive: 30 * time.Second, - Timeout: 30 * time.Second, - }).DialContext, - }, - CheckRedirect: client.CheckRedirect, - })(c) - } -} - // EndpointFromContext parses a context docker endpoint metadata into a typed EndpointMeta structure func EndpointFromContext(metadata store.Metadata) (EndpointMeta, error) { ep, ok := metadata.Endpoints[DockerEndpoint] @@ -160,7 +157,7 @@ func EndpointFromContext(metadata store.Metadata) (EndpointMeta, error) { } typed, ok := ep.(EndpointMeta) if !ok { - return EndpointMeta{}, errors.Errorf("endpoint %q is not of type EndpointMeta", DockerEndpoint) + return EndpointMeta{}, fmt.Errorf("endpoint %q is not of type EndpointMeta", DockerEndpoint) } return typed, nil } diff --git a/vendor/github.com/docker/cli/cli/context/store/metadatastore.go b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go index deec5cc9d..ecef656d8 100644 --- a/vendor/github.com/docker/cli/cli/context/store/metadatastore.go +++ b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package store diff --git a/vendor/github.com/docker/cli/cli/context/store/store.go b/vendor/github.com/docker/cli/cli/context/store/store.go index 91d9c19c6..f73b710c5 100644 --- a/vendor/github.com/docker/cli/cli/context/store/store.go +++ b/vendor/github.com/docker/cli/cli/context/store/store.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package store @@ -18,14 +18,9 @@ import ( "path/filepath" "strings" - "github.com/docker/cli/internal/lazyregexp" "github.com/opencontainers/go-digest" ) -const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$" - -var restrictedNameRegEx = lazyregexp.New(restrictedNamePattern) - // Store provides a context store for easily remembering endpoints configuration type Store interface { Reader @@ -225,12 +220,43 @@ func ValidateContextName(name string) error { if name == "default" { return errors.New(`"default" is a reserved context name`) } - if !restrictedNameRegEx.MatchString(name) { - return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern) + if !isValidName(name) { + return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, validNameFormat) } return nil } +// validNameFormat is used as part of errors for invalid context-names. +// We should consider making this less technical ("must start with "a-z", +// and only consist of alphanumeric characters and separators"). +const validNameFormat = `^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$` + +// isValidName checks if the context-name is valid ("^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$"). +// +// Names must start with an alphanumeric character (a-zA-Z0-9), followed by +// alphanumeric or separators ("_", ".", "+", "-"). +func isValidName(s string) bool { + if len(s) < 2 || !isAlphaNum(s[0]) { + return false + } + + for i := 1; i < len(s); i++ { + c := s[i] + if isAlphaNum(c) || c == '_' || c == '.' || c == '+' || c == '-' { + continue + } + return false + } + + return true +} + +func isAlphaNum(c byte) bool { + return (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') +} + // Export exports an existing namespace into an opaque data stream // This stream is actually a tarball containing context metadata and TLS materials, but it does // not map 1:1 the layout of the context store (don't try to restore it manually without calling store.Import) diff --git a/vendor/github.com/docker/cli/cli/context/store/storeconfig.go b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go index fccbf1d1f..1b7f33106 100644 --- a/vendor/github.com/docker/cli/cli/context/store/storeconfig.go +++ b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package store diff --git a/vendor/github.com/docker/cli/cli/context/tlsdata.go b/vendor/github.com/docker/cli/cli/context/tlsdata.go index c758612a1..9a53d2fd0 100644 --- a/vendor/github.com/docker/cli/cli/context/tlsdata.go +++ b/vendor/github.com/docker/cli/cli/context/tlsdata.go @@ -1,10 +1,10 @@ package context import ( + "fmt" "os" "github.com/docker/cli/cli/context/store" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -45,14 +45,14 @@ func (data *TLSData) ToStoreTLSData() *store.EndpointTLSData { func LoadTLSData(s store.Reader, contextName, endpointName string) (*TLSData, error) { tlsFiles, err := s.ListTLSFiles(contextName) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve TLS files for context %q", contextName) + return nil, fmt.Errorf("failed to retrieve TLS files for context %q: %w", contextName, err) } if epTLSFiles, ok := tlsFiles[endpointName]; ok { var tlsData TLSData for _, f := range epTLSFiles { data, err := s.GetTLSData(contextName, endpointName, f) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve TLS data (%s) for context %q", f, contextName) + return nil, fmt.Errorf("failed to retrieve TLS data (%s) for context %q: %w", f, contextName, err) } switch f { case caKey: diff --git a/vendor/github.com/docker/cli/cli/flags/options.go b/vendor/github.com/docker/cli/cli/flags/options.go index 309cb4616..8c31a0f17 100644 --- a/vendor/github.com/docker/cli/cli/flags/options.go +++ b/vendor/github.com/docker/cli/cli/flags/options.go @@ -7,8 +7,8 @@ import ( "path/filepath" "github.com/docker/cli/cli/config" - "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" + "github.com/moby/moby/client" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) @@ -110,22 +110,20 @@ func (o *ClientOptions) InstallFlags(flags *pflag.FlagSet) { if dockerCertPath == "" { dockerCertPath = configDir } + o.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } flags.StringVar(&o.ConfigDir, "config", configDir, "Location of client config files") flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug", "info", "warn", "error", "fatal")`) flags.BoolVar(&o.TLS, "tls", dockerTLS, "Use TLS; implied by --tlsverify") flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") - - o.TLSOptions = &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, DefaultCaFile), - CertFile: filepath.Join(dockerCertPath, DefaultCertFile), - KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), - } - tlsOptions := o.TLSOptions - flags.Var("edString{&tlsOptions.CAFile}, "tlscacert", "Trust certs signed only by this CA") - flags.Var("edString{&tlsOptions.CertFile}, "tlscert", "Path to TLS certificate file") - flags.Var("edString{&tlsOptions.KeyFile}, "tlskey", "Path to TLS key file") + flags.StringVar(&o.TLSOptions.CAFile, "tlscacert", o.TLSOptions.CAFile, "Trust certs signed only by this CA") + flags.StringVar(&o.TLSOptions.CertFile, "tlscert", o.TLSOptions.CertFile, "Path to TLS certificate file") + flags.StringVar(&o.TLSOptions.KeyFile, "tlskey", o.TLSOptions.KeyFile, "Path to TLS key file") // TODO(thaJeztah): show the default host. // TODO(thaJeztah): this should be a string, not an "array" as we only allow a single host. @@ -179,33 +177,3 @@ func SetLogLevel(logLevel string) { logrus.SetLevel(logrus.InfoLevel) } } - -type quotedString struct { - value *string -} - -func (s *quotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -func (*quotedString) Type() string { - return "string" -} - -func (s *quotedString) String() string { - return *s.value -} - -func trimQuotes(value string) string { - if len(value) < 2 { - return value - } - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} diff --git a/vendor/github.com/docker/cli/cli/required.go b/vendor/github.com/docker/cli/cli/required.go index 6455e8867..b6c3b1f9c 100644 --- a/vendor/github.com/docker/cli/cli/required.go +++ b/vendor/github.com/docker/cli/cli/required.go @@ -1,7 +1,8 @@ package cli import ( - "github.com/pkg/errors" + "fmt" + "github.com/spf13/cobra" ) @@ -12,7 +13,7 @@ func NoArgs(cmd *cobra.Command, args []string) error { } if cmd.HasSubCommands() { - return errors.Errorf( + return fmt.Errorf( "%[1]s: unknown command: %[2]s %[3]s\n\nUsage: %[4]s\n\nRun '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), @@ -21,7 +22,7 @@ func NoArgs(cmd *cobra.Command, args []string) error { ) } - return errors.Errorf( + return fmt.Errorf( "%[1]s: '%[2]s' accepts no arguments\n\nUsage: %[3]s\n\nRun '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), @@ -35,7 +36,7 @@ func RequiresMinArgs(minArgs int) cobra.PositionalArgs { if len(args) >= minArgs { return nil } - return errors.Errorf( + return fmt.Errorf( "%[1]s: '%[2]s' requires at least %[3]d %[4]s\n\nUsage: %[5]s\n\nSee '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), @@ -52,8 +53,8 @@ func RequiresMaxArgs(maxArgs int) cobra.PositionalArgs { if len(args) <= maxArgs { return nil } - return errors.Errorf( - "%[1]s: '%[2]s' requires at most %[3]d %[4]s\n\nUsage: %[5]s\n\nSRun '%[2]s --help' for more information", + return fmt.Errorf( + "%[1]s: '%[2]s' requires at most %[3]d %[4]s\n\nUsage: %[5]s\n\nRun '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), maxArgs, @@ -69,7 +70,7 @@ func RequiresRangeArgs(minArgs int, maxArgs int) cobra.PositionalArgs { if len(args) >= minArgs && len(args) <= maxArgs { return nil } - return errors.Errorf( + return fmt.Errorf( "%[1]s: '%[2]s' requires at least %[3]d and at most %[4]d %[5]s\n\nUsage: %[6]s\n\nRun '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), @@ -87,7 +88,7 @@ func ExactArgs(number int) cobra.PositionalArgs { if len(args) == number { return nil } - return errors.Errorf( + return fmt.Errorf( "%[1]s: '%[2]s' requires %[3]d %[4]s\n\nUsage: %[5]s\n\nRun '%[2]s --help' for more information", binName(cmd), cmd.CommandPath(), diff --git a/vendor/github.com/docker/cli/internal/tui/chip.go b/vendor/github.com/docker/cli/internal/tui/chip.go index 02a9b8b8b..c48158e75 100644 --- a/vendor/github.com/docker/cli/internal/tui/chip.go +++ b/vendor/github.com/docker/cli/internal/tui/chip.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui diff --git a/vendor/github.com/docker/cli/internal/tui/colors.go b/vendor/github.com/docker/cli/internal/tui/colors.go index d82d61dd7..6f1d8e6b7 100644 --- a/vendor/github.com/docker/cli/internal/tui/colors.go +++ b/vendor/github.com/docker/cli/internal/tui/colors.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui diff --git a/vendor/github.com/docker/cli/internal/tui/count.go b/vendor/github.com/docker/cli/internal/tui/count.go index 5d7ebd944..a626b24f8 100644 --- a/vendor/github.com/docker/cli/internal/tui/count.go +++ b/vendor/github.com/docker/cli/internal/tui/count.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui diff --git a/vendor/github.com/docker/cli/internal/tui/note.go b/vendor/github.com/docker/cli/internal/tui/note.go index d2bc0b9af..759d8d0e2 100644 --- a/vendor/github.com/docker/cli/internal/tui/note.go +++ b/vendor/github.com/docker/cli/internal/tui/note.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui @@ -28,7 +28,7 @@ func withHeader(header Str) noteOptions { } func (o Output) printNoteWithOptions(format string, args []any, opts ...noteOptions) { - if o.isTerminal { + if !o.noColor { // TODO: Handle all flags format = strings.ReplaceAll(format, "--platform", ColorFlag.Apply("--platform")) } @@ -51,7 +51,7 @@ func (o Output) printNoteWithOptions(format string, args []any, opts ...noteOpti } l := line - if o.isTerminal { + if !o.noColor { l = aec.Italic.Apply(l) } _, _ = fmt.Fprintln(o, l) diff --git a/vendor/github.com/docker/cli/internal/tui/output.go b/vendor/github.com/docker/cli/internal/tui/output.go index 1f526d3fb..34bc13a3a 100644 --- a/vendor/github.com/docker/cli/internal/tui/output.go +++ b/vendor/github.com/docker/cli/internal/tui/output.go @@ -1,10 +1,11 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui import ( "fmt" + "os" "github.com/docker/cli/cli/streams" "github.com/morikuni/aec" @@ -12,7 +13,7 @@ import ( type Output struct { *streams.Out - isTerminal bool + noColor bool } type terminalPrintable interface { @@ -20,24 +21,28 @@ type terminalPrintable interface { } func NewOutput(out *streams.Out) Output { + noColor := !out.IsTerminal() + if os.Getenv("NO_COLOR") != "" { + noColor = true + } return Output{ - Out: out, - isTerminal: out.IsTerminal(), + Out: out, + noColor: noColor, } } func (o Output) Color(clr aec.ANSI) aec.ANSI { - if o.isTerminal { - return clr + if o.noColor { + return ColorNone } - return ColorNone + return clr } func (o Output) Sprint(all ...any) string { var out []any for _, p := range all { if s, ok := p.(terminalPrintable); ok { - out = append(out, s.String(o.isTerminal)) + out = append(out, s.String(!o.noColor)) } else { out = append(out, p) } @@ -47,7 +52,7 @@ func (o Output) Sprint(all ...any) string { func (o Output) PrintlnWithColor(clr aec.ANSI, args ...any) { msg := o.Sprint(args...) - if o.isTerminal { + if !o.noColor { msg = clr.Apply(msg) } _, _ = fmt.Fprintln(o.Out, msg) diff --git a/vendor/github.com/docker/cli/internal/tui/str.go b/vendor/github.com/docker/cli/internal/tui/str.go index c1ea9c95d..82fc59f1c 100644 --- a/vendor/github.com/docker/cli/internal/tui/str.go +++ b/vendor/github.com/docker/cli/internal/tui/str.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package tui diff --git a/vendor/github.com/docker/cli/internal/volumespec/volumespec.go b/vendor/github.com/docker/cli/internal/volumespec/volumespec.go index 0ae4f31c8..791805edf 100644 --- a/vendor/github.com/docker/cli/internal/volumespec/volumespec.go +++ b/vendor/github.com/docker/cli/internal/volumespec/volumespec.go @@ -1,12 +1,13 @@ package volumespec import ( + "errors" + "fmt" "strings" "unicode" "unicode/utf8" - "github.com/docker/docker/api/types/mount" - "github.com/pkg/errors" + "github.com/moby/moby/api/types/mount" ) const endOfSpec = rune(0) @@ -24,7 +25,7 @@ func Parse(spec string) (VolumeConfig, error) { return volume, nil } - buffer := []rune{} + var buffer []rune for _, char := range spec + string(endOfSpec) { switch { case isWindowsDrive(buffer, char): @@ -32,7 +33,7 @@ func Parse(spec string) (VolumeConfig, error) { case char == ':' || char == endOfSpec: if err := populateFieldFromBuffer(char, buffer, &volume); err != nil { populateType(&volume) - return volume, errors.Wrapf(err, "invalid spec: %s", spec) + return volume, fmt.Errorf("invalid spec: %s: %w", spec, err) } buffer = []rune{} default: diff --git a/vendor/github.com/docker/cli/opts/envfile_deprecated.go b/vendor/github.com/docker/cli/opts/envfile_deprecated.go deleted file mode 100644 index f2f10b181..000000000 --- a/vendor/github.com/docker/cli/opts/envfile_deprecated.go +++ /dev/null @@ -1,14 +0,0 @@ -package opts - -import ( - "os" - - "github.com/docker/cli/pkg/kvfile" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// Deprecated: use [kvfile.Parse] and pass [os.LookupEnv] to lookup env-vars from the current environment. -func ParseEnvFile(filename string) ([]string, error) { - return kvfile.Parse(filename, os.LookupEnv) -} diff --git a/vendor/github.com/docker/cli/opts/gpus.go b/vendor/github.com/docker/cli/opts/gpus.go index 6a56c49c4..b39a3f14e 100644 --- a/vendor/github.com/docker/cli/opts/gpus.go +++ b/vendor/github.com/docker/cli/opts/gpus.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/container" + "github.com/moby/moby/api/types/container" ) // GpuOpts is a Value type for parsing mounts diff --git a/vendor/github.com/docker/cli/opts/hosts.go b/vendor/github.com/docker/cli/opts/hosts.go index 87e1a1da7..dcbbb7e78 100644 --- a/vendor/github.com/docker/cli/opts/hosts.go +++ b/vendor/github.com/docker/cli/opts/hosts.go @@ -32,23 +32,6 @@ const ( hostGatewayName = "host-gateway" ) -// ValidateHost validates that the specified string is a valid host and returns it. -// -// Deprecated: this function is no longer used, and will be removed in the next release. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for TLS - return val, nil -} - // ParseHost and set defaults for a Daemon host string func ParseHost(defaultToTLS bool, val string) (string, error) { host := strings.TrimSpace(val) diff --git a/vendor/github.com/docker/cli/opts/mount.go b/vendor/github.com/docker/cli/opts/mount.go index 05c1cd0b0..0ac252f31 100644 --- a/vendor/github.com/docker/cli/opts/mount.go +++ b/vendor/github.com/docker/cli/opts/mount.go @@ -9,9 +9,8 @@ import ( "strconv" "strings" - mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/go-units" - "github.com/sirupsen/logrus" + mounttypes "github.com/moby/moby/api/types/mount" ) // MountOpt is a Value type for parsing mounts @@ -88,8 +87,7 @@ func (m *MountOpt) Set(value string) error { volumeOptions().NoCopy = true continue case "bind-nonrecursive": - bindOptions().NonRecursive = true - continue + return errors.New("bind-nonrecursive is deprecated, use bind-recursive=disabled instead") default: return fmt.Errorf("invalid field '%s' must be a key=value pair", field) } @@ -117,16 +115,12 @@ func (m *MountOpt) Set(value string) error { case "bind-propagation": bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(val)) case "bind-nonrecursive": - bindOptions().NonRecursive, err = strconv.ParseBool(val) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, val) - } - logrus.Warn("bind-nonrecursive is deprecated, use bind-recursive=disabled instead") + return errors.New("bind-nonrecursive is deprecated, use bind-recursive=disabled instead") case "bind-recursive": switch val { case "enabled": // read-only mounts are recursively read-only if Engine >= v25 && kernel >= v5.12, otherwise writable // NOP - case "disabled": // alias of bind-nonrecursive=true + case "disabled": // previously "bind-nonrecursive=true" bindOptions().NonRecursive = true case "writable": // conforms to the default read-only bind-mount of Docker v24; read-only mounts are recursively mounted but not recursively read-only bindOptions().ReadOnlyNonRecursive = true diff --git a/vendor/github.com/docker/cli/opts/network.go b/vendor/github.com/docker/cli/opts/network.go index 43b3a09d4..489ef8be3 100644 --- a/vendor/github.com/docker/cli/opts/network.go +++ b/vendor/github.com/docker/cli/opts/network.go @@ -4,6 +4,7 @@ import ( "encoding/csv" "errors" "fmt" + "net/netip" "regexp" "strconv" "strings" @@ -26,9 +27,9 @@ type NetworkAttachmentOpts struct { Aliases []string DriverOpts map[string]string Links []string // TODO add support for links in the csv notation of `--network` - IPv4Address string - IPv6Address string - LinkLocalIPs []string + IPv4Address netip.Addr + IPv6Address netip.Addr + LinkLocalIPs []netip.Addr MacAddress string GwPriority int } @@ -70,13 +71,23 @@ func (n *NetworkOpt) Set(value string) error { //nolint:gocyclo case networkOptAlias: netOpt.Aliases = append(netOpt.Aliases, val) case networkOptIPv4Address: - netOpt.IPv4Address = val + netOpt.IPv4Address, err = netip.ParseAddr(val) + if err != nil { + return err + } case networkOptIPv6Address: - netOpt.IPv6Address = val + netOpt.IPv6Address, err = netip.ParseAddr(val) + if err != nil { + return err + } case networkOptMacAddress: netOpt.MacAddress = val case networkOptLinkLocalIP: - netOpt.LinkLocalIPs = append(netOpt.LinkLocalIPs, val) + a, err := netip.ParseAddr(val) + if err != nil { + return err + } + netOpt.LinkLocalIPs = append(netOpt.LinkLocalIPs, a) case driverOpt: key, val, err = parseDriverOpt(val) if err != nil { diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go index 94eda0560..0b8979b6b 100644 --- a/vendor/github.com/docker/cli/opts/opts.go +++ b/vendor/github.com/docker/cli/opts/opts.go @@ -1,6 +1,7 @@ package opts import ( + "encoding/json" "errors" "fmt" "math/big" @@ -9,8 +10,8 @@ import ( "strings" "github.com/docker/cli/internal/lazyregexp" - "github.com/docker/docker/api/types/filters" "github.com/docker/go-units" + "github.com/moby/moby/client" ) var ( @@ -60,6 +61,8 @@ func (opts *ListOpts) Set(value string) error { } // Delete removes the specified element from the slice. +// +// Deprecated: this method is no longer used and will be removed in the next release. func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { @@ -79,13 +82,6 @@ func (opts *ListOpts) GetMap() map[string]struct{} { return ret } -// GetAll returns the values of slice. -// -// Deprecated: use [ListOpts.GetSlice] instead. This method will be removed in a future release. -func (opts *ListOpts) GetAll() []string { - return *opts.values -} - // GetSlice returns the values of slice. // // It implements [cobra.SliceValue] to allow shell completion to be provided @@ -132,43 +128,6 @@ func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { return opts } -// NamedOption is an interface that list and map options -// with names implement. -// -// Deprecated: NamedOption is no longer used and will be removed in the next release. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -// -// Deprecated: NamedListOpts is no longer used and will be removed in the next release. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -// -// Deprecated: NewNamedListOptsRef is no longer used and will be removed in the next release. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -// -// Deprecated: NamedListOpts is no longer used and will be removed in the next release. -func (o *NamedListOpts) Name() string { - return o.name -} - // MapOpts holds a map of values and a validation function. type MapOpts struct { values map[string]string @@ -215,35 +174,6 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { } } -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -// -// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -// -// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -// -// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. -func (o *NamedMapOpts) Name() string { - return o.name -} - // ValidatorFctType defines a validator function that returns a validated string and/or an error. type ValidatorFctType func(val string) (string, error) @@ -264,6 +194,8 @@ func ValidateIPAddress(val string) (string, error) { } // ValidateMACAddress validates a MAC address. +// +// Deprecated: use [net.ParseMAC]. This function will be removed in the next release. func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { @@ -350,20 +282,23 @@ func ValidateSysctl(val string) (string, error) { // FilterOpt is a flag type for validating filters type FilterOpt struct { - filter filters.Args + filter client.Filters } // NewFilterOpt returns a new FilterOpt func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} + return FilterOpt{filter: make(client.Filters)} } func (o *FilterOpt) String() string { - repr, err := filters.ToJSON(o.filter) + if o == nil || len(o.filter) == 0 { + return "" + } + repr, err := json.Marshal(o.filter) if err != nil { return "invalid filters" } - return repr + return string(repr) } // Set sets the value of the opt by parsing the command line value @@ -389,7 +324,7 @@ func (*FilterOpt) Type() string { } // Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { +func (o *FilterOpt) Value() client.Filters { return o.filter } diff --git a/vendor/github.com/docker/cli/opts/parse.go b/vendor/github.com/docker/cli/opts/parse.go index 996d4d7e7..c04fc7d4b 100644 --- a/vendor/github.com/docker/cli/opts/parse.go +++ b/vendor/github.com/docker/cli/opts/parse.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/docker/cli/pkg/kvfile" - "github.com/docker/docker/api/types/container" + "github.com/moby/moby/api/types/container" ) // ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys diff --git a/vendor/github.com/docker/cli/opts/quotedstring.go b/vendor/github.com/docker/cli/opts/quotedstring.go deleted file mode 100644 index d1d8b09a1..000000000 --- a/vendor/github.com/docker/cli/opts/quotedstring.go +++ /dev/null @@ -1,44 +0,0 @@ -package opts - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -// -// Deprecated: This option type is no longer used and will be removed in the next release. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (*QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return *s.value -} - -func trimQuotes(value string) string { - if len(value) < 2 { - return value - } - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -// -// Deprecated: This option type is no longer used and will be removed in the next release. -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/cli/opts/swarmopts/config.go b/vendor/github.com/docker/cli/opts/swarmopts/config.go index ff137304a..78e8f886b 100644 --- a/vendor/github.com/docker/cli/opts/swarmopts/config.go +++ b/vendor/github.com/docker/cli/opts/swarmopts/config.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/swarm" + "github.com/moby/moby/api/types/swarm" ) // ConfigOpt is a Value type for parsing configs diff --git a/vendor/github.com/docker/cli/opts/swarmopts/port.go b/vendor/github.com/docker/cli/opts/swarmopts/port.go index e15c6b830..2a8c2b71c 100644 --- a/vendor/github.com/docker/cli/opts/swarmopts/port.go +++ b/vendor/github.com/docker/cli/opts/swarmopts/port.go @@ -9,8 +9,9 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/swarm" "github.com/docker/go-connections/nat" + "github.com/moby/moby/api/types/network" + "github.com/moby/moby/api/types/swarm" "github.com/sirupsen/logrus" ) @@ -41,7 +42,10 @@ func (p *PortOpt) Set(value string) error { return err } - pConfig := swarm.PortConfig{} + pConfig := swarm.PortConfig{ + Protocol: network.TCP, + PublishMode: swarm.PortConfigPublishModeIngress, + } for _, field := range fields { // TODO(thaJeztah): these options should not be case-insensitive. key, val, ok := strings.Cut(strings.ToLower(field), "=") @@ -50,17 +54,19 @@ func (p *PortOpt) Set(value string) error { } switch key { case portOptProtocol: - if val != string(swarm.PortConfigProtocolTCP) && val != string(swarm.PortConfigProtocolUDP) && val != string(swarm.PortConfigProtocolSCTP) { + switch proto := network.IPProtocol(val); proto { + case network.TCP, network.UDP, network.SCTP: + pConfig.Protocol = proto + default: return fmt.Errorf("invalid protocol value '%s'", val) } - - pConfig.Protocol = swarm.PortConfigProtocol(val) case portOptMode: - if val != string(swarm.PortConfigPublishModeIngress) && val != string(swarm.PortConfigPublishModeHost) { + switch swarm.PortConfigPublishMode(val) { + case swarm.PortConfigPublishModeIngress, swarm.PortConfigPublishModeHost: + pConfig.PublishMode = swarm.PortConfigPublishMode(val) + default: return fmt.Errorf("invalid publish mode value (%s): must be either '%s' or '%s'", val, swarm.PortConfigPublishModeIngress, swarm.PortConfigPublishModeHost) } - - pConfig.PublishMode = swarm.PortConfigPublishMode(val) case portOptTargetPort: tPort, err := strconv.ParseUint(val, 10, 16) if err != nil { @@ -92,18 +98,11 @@ func (p *PortOpt) Set(value string) error { return fmt.Errorf("missing mandatory field '%s'", portOptTargetPort) } - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - p.ports = append(p.ports, pConfig) } else { - // short syntax - portConfigs := []swarm.PortConfig{} + // short syntax ([ip:]public:private[/proto]) + // + // TODO(thaJeztah): we need an equivalent that handles the "ip-address" part without depending on the nat package. ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) if err != nil { return err @@ -116,8 +115,13 @@ func (p *PortOpt) Set(value string) error { } } + var portConfigs []swarm.PortConfig for port := range ports { - portConfig, err := ConvertPortToPortConfig(port, portBindingMap) + portProto, err := network.ParsePort(string(port)) + if err != nil { + return err + } + portConfig, err := ConvertPortToPortConfig(portProto, portBindingMap) if err != nil { return err } @@ -135,7 +139,7 @@ func (*PortOpt) Type() string { // String returns a string repr of this option func (p *PortOpt) String() string { - ports := []string{} + ports := make([]string, 0, len(p.ports)) for _, port := range p.ports { repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) ports = append(ports, repr) @@ -150,29 +154,27 @@ func (p *PortOpt) Value() []swarm.PortConfig { // ConvertPortToPortConfig converts ports to the swarm type func ConvertPortToPortConfig( - port nat.Port, + portProto network.Port, portBindings map[nat.Port][]nat.PortBinding, ) ([]swarm.PortConfig, error) { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { + ports := make([]swarm.PortConfig, 0, len(portBindings)) + for _, binding := range portBindings[nat.Port(portProto.String())] { if p := net.ParseIP(binding.HostIP); p != nil && !p.IsUnspecified() { // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). - logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), port) + logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), portProto.String()) } - startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort) - + pr, err := network.ParsePortRange(binding.HostPort) if err != nil && binding.HostPort != "" { - return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) + return nil, fmt.Errorf("invalid hostport binding (%s) for port (%d)", binding.HostPort, portProto.Num()) } - for i := startHostPort; i <= endHostPort; i++ { + for p := range pr.All() { ports = append(ports, swarm.PortConfig{ // TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(i), + Protocol: portProto.Proto(), + TargetPort: uint32(portProto.Num()), + PublishedPort: uint32(p.Num()), PublishMode: swarm.PortConfigPublishModeIngress, }) } diff --git a/vendor/github.com/docker/cli/opts/swarmopts/secret.go b/vendor/github.com/docker/cli/opts/swarmopts/secret.go index 9f97627a5..b28970af9 100644 --- a/vendor/github.com/docker/cli/opts/swarmopts/secret.go +++ b/vendor/github.com/docker/cli/opts/swarmopts/secret.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/swarm" + "github.com/moby/moby/api/types/swarm" ) // SecretOpt is a Value type for parsing secrets diff --git a/vendor/github.com/docker/cli/opts/throttledevice.go b/vendor/github.com/docker/cli/opts/throttledevice.go index 46b09185c..bc759820e 100644 --- a/vendor/github.com/docker/cli/opts/throttledevice.go +++ b/vendor/github.com/docker/cli/opts/throttledevice.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/blkiodev" "github.com/docker/go-units" + "github.com/moby/moby/api/types/blkiodev" ) // ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. diff --git a/vendor/github.com/docker/cli/opts/ulimit.go b/vendor/github.com/docker/cli/opts/ulimit.go index 48052c887..aa88bce71 100644 --- a/vendor/github.com/docker/cli/opts/ulimit.go +++ b/vendor/github.com/docker/cli/opts/ulimit.go @@ -4,8 +4,8 @@ import ( "fmt" "sort" - "github.com/docker/docker/api/types/container" "github.com/docker/go-units" + "github.com/moby/moby/api/types/container" ) // UlimitOpt defines a map of Ulimits diff --git a/vendor/github.com/docker/cli/opts/weightdevice.go b/vendor/github.com/docker/cli/opts/weightdevice.go index 036c7c8c5..4476548fd 100644 --- a/vendor/github.com/docker/cli/opts/weightdevice.go +++ b/vendor/github.com/docker/cli/opts/weightdevice.go @@ -5,7 +5,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types/blkiodev" + "github.com/moby/moby/api/types/blkiodev" ) // ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. diff --git a/vendor/github.com/docker/cli/templates/templates.go b/vendor/github.com/docker/cli/templates/templates.go index f0726eec9..e339050ab 100644 --- a/vendor/github.com/docker/cli/templates/templates.go +++ b/vendor/github.com/docker/cli/templates/templates.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 +//go:build go1.24 package templates @@ -13,18 +13,7 @@ import ( // basicFunctions are the set of initial // functions provided to every template. var basicFunctions = template.FuncMap{ - "json": func(v any) string { - buf := &bytes.Buffer{} - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(v) - if err != nil { - panic(err) - } - - // Remove the trailing new line added by the encoder - return strings.TrimSpace(buf.String()) - }, + "json": formatJSON, "split": strings.Split, "join": strings.Join, "title": strings.Title, //nolint:nolintlint,staticcheck // strings.Title is deprecated, but we only use it for ASCII, so replacing with golang.org/x/text is out of scope @@ -71,7 +60,7 @@ var HeaderFunctions = template.FuncMap{ // Parse creates a new anonymous template with the basic functions // and parses the given format. func Parse(format string) (*template.Template, error) { - return NewParse("", format) + return template.New("").Funcs(basicFunctions).Parse(format) } // New creates a new empty template with the provided tag and built-in @@ -80,12 +69,6 @@ func New(tag string) *template.Template { return template.New(tag).Funcs(basicFunctions) } -// NewParse creates a new tagged template with the basic functions -// and parses the given format. -func NewParse(tag, format string) (*template.Template, error) { - return New(tag).Parse(format) -} - // padWithSpace adds whitespace to the input if the input is non-empty func padWithSpace(source string, prefix, suffix int) string { if source == "" { @@ -101,3 +84,16 @@ func truncateWithLength(source string, length int) string { } return source[:length] } + +func formatJSON(v any) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(v) + if err != nil { + panic(err) + } + + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) +} diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 1401fa715..feb42e808 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -81,7 +81,6 @@ info: { "username": "string", "password": "string", - "email": "string", "serveraddress": "string" } ``` @@ -637,6 +636,9 @@ definitions: by the default (runc) runtime. This field is omitted when empty. + + **Deprecated**: This field is deprecated as kernel 6.12 has deprecated `memory.kmem.tcp.limit_in_bytes` field + for cgroups v1. This field will be removed in a future release. type: "integer" format: "int64" MemoryReservation: @@ -1531,37 +1533,6 @@ definitions: items: type: "string" example: ["/bin/sh", "-c"] - # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. - example: - "User": "web:web" - "ExposedPorts": { - "80/tcp": {}, - "443/tcp": {} - } - "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] - "Cmd": ["/bin/sh"] - "Healthcheck": { - "Test": ["string"], - "Interval": 0, - "Timeout": 0, - "Retries": 0, - "StartPeriod": 0, - "StartInterval": 0 - } - "ArgsEscaped": true - "Volumes": { - "/app/data": {}, - "/app/config": {} - } - "WorkingDir": "/public/" - "Entrypoint": [] - "OnBuild": [] - "Labels": { - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value" - } - "StopSignal": "SIGTERM" - "Shell": ["/bin/sh", "-c"] NetworkingConfig: description: | @@ -1967,6 +1938,11 @@ definitions: Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. + + > **Deprecated**: This field is only set when using the deprecated + > legacy builder. It is included in API responses for informational + > purposes, but should not be depended on as it will be omitted + > once the legacy builder is removed. type: "string" x-nullable: false example: "" @@ -1992,6 +1968,11 @@ definitions: The version of Docker that was used to build the image. Depending on how the image was created, this field may be empty. + + > **Deprecated**: This field is only set when using the deprecated + > legacy builder. It is included in API responses for informational + > purposes, but should not be depended on as it will be omitted + > once the legacy builder is removed. type: "string" x-nullable: false example: "27.0.1" @@ -2036,14 +2017,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 GraphDriver: $ref: "#/definitions/DriverData" RootFS: @@ -2176,14 +2149,6 @@ definitions: format: "int64" x-nullable: false example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 Labels: description: "User-defined key/value metadata." type: "object" @@ -2688,14 +2653,6 @@ definitions: description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" Parents: description: | List of parent build cache record IDs. @@ -3177,10 +3134,15 @@ definitions: - Args properties: DockerVersion: - description: "Docker Version used to create the plugin" + description: |- + Docker Version used to create the plugin. + + Depending on how the plugin was created, this field may be empty or omitted. + + Deprecated: this field is no longer set, and will be removed in the next API version. type: "string" x-nullable: false - example: "17.06.0-ce" + x-omitempty: true Description: type: "string" x-nullable: false @@ -6382,6 +6344,8 @@ definitions: Kernel memory TCP limits are not supported when using cgroups v2, which does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + + **Deprecated**: This field is deprecated as kernel 6.12 has deprecated kernel memory TCP accounting. type: "boolean" example: true CpuCfsPeriod: @@ -6419,29 +6383,6 @@ definitions: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true - BridgeNfIptables: - description: | - Indicates if `bridge-nf-call-iptables` is available on the host when - the daemon was started. - -


- - > **Deprecated**: netfilter module is now loaded on-demand and no longer - > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. - type: "boolean" - example: false - BridgeNfIp6tables: - description: | - Indicates if `bridge-nf-call-ip6tables` is available on the host. - -


- - > **Deprecated**: netfilter module is now loaded on-demand, and no longer - > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. - type: "boolean" - example: false Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index f63f049c7..7a41436cc 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -394,7 +394,12 @@ type Resources struct { // KernelMemory specifies the kernel memory limit (in bytes) for the container. // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` + KernelMemory int64 `json:",omitempty"` + // Hard limit for kernel TCP buffer memory (in bytes). + // + // Deprecated: This field is deprecated and will be removed in the next release. + // Starting with 6.12, the kernel has deprecated kernel memory tcp accounting + // for cgroups v1. KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go index 3bdb47428..1bec0b72b 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_inspect.go +++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go @@ -48,6 +48,8 @@ type InspectResponse struct { // Depending on how the image was created, this field may be empty and // is only set for images that were built/created locally. This field // is empty if the image was pulled from an image registry. + // + // Deprecated: this field is deprecated, and will be removed in the next release. Parent string // Comment is an optional message that can be set when committing or @@ -80,6 +82,8 @@ type InspectResponse struct { // DockerVersion is the version of Docker that was used to build the image. // // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is deprecated, and will be removed in the next release. DockerVersion string // Author is the name of the author that was specified when committing the diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index abae48b9a..a9eff28a0 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -42,7 +42,11 @@ type PluginConfig struct { // Required: true Description string `json:"Description"` - // Docker Version used to create the plugin + // Docker Version used to create the plugin. + // + // Depending on how the plugin was created, this field may be empty or omitted. + // + // Deprecated: this field is no longer set, and will be removed in the next API version. DockerVersion string `json:"DockerVersion,omitempty"` // documentation diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 047639ed9..0f39099d8 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -9,19 +9,23 @@ import ( // Info contains response of Engine API: // GET "/info" type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + // KernelMemoryLimit is not supported on cgroups v2. + // + // Deprecated: This field is deprecated and will be removed in the next release. + // Starting with kernel 6.12, the kernel has deprecated kernel memory tcp accounting KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go deleted file mode 100644 index 24e1b45f2..000000000 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package streamformatter provides helper functions to format a stream. -package streamformatter - -import ( - "encoding/json" - "fmt" - "io" - "sync" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" -) - -const streamNewline = "\r\n" - -type jsonProgressFormatter struct{} - -func appendNewline(source []byte) []byte { - return append(source, []byte(streamNewline)...) -} - -// FormatStatus formats the specified objects according to the specified format (and id). -func FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return FormatError(err) - } - return appendNewline(b) -} - -// FormatError formats the error as a JSON object -func FormatError(err error) []byte { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return appendNewline(b) - } - return []byte(`{"error":"format error"}` + streamNewline) -} - -func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { - return FormatStatus(id, format, a...) -} - -// formatProgress formats the progress information for a specified action. -func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) - if err != nil { - return nil - } - return appendNewline(b) -} - -type rawProgressFormatter struct{} - -func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { - return []byte(fmt.Sprintf(format, a...) + streamNewline) -} - -func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -// NewProgressOutput returns a progress.Output object that can be passed to -// progress.NewProgressReader. -func NewProgressOutput(out io.Writer) progress.Output { - return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} -} - -// NewJSONProgressOutput returns a progress.Output that formats output -// using JSON objects -func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} -} - -type formatProgress interface { - formatStatus(id, format string, a ...interface{}) []byte - formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte -} - -type progressOutput struct { - sf formatProgress - out io.Writer - newLines bool - mu sync.Mutex -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *progressOutput) WriteProgress(prog progress.Progress) error { - var formatted []byte - if prog.Message != "" { - formatted = out.sf.formatStatus(prog.ID, prog.Message) - } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} - formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) - } - - out.mu.Lock() - defer out.mu.Unlock() - _, err := out.out.Write(formatted) - if err != nil { - return err - } - - if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.formatStatus("", "")) - return err - } - - return nil -} - -// AuxFormatter is a streamFormatter that writes aux progress messages -type AuxFormatter struct { - io.Writer -} - -// Emit emits the given interface as an aux progress message -func (sf *AuxFormatter) Emit(id string, aux interface{}) error { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return err - } - auxJSON := new(json.RawMessage) - *auxJSON = auxJSONBytes - msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Aux: auxJSON}) - if err != nil { - return err - } - msgJSON = appendNewline(msgJSON) - n, err := sf.Writer.Write(msgJSON) - if n != len(msgJSON) { - return io.ErrShortWrite - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go deleted file mode 100644 index 141d12e20..000000000 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go +++ /dev/null @@ -1,47 +0,0 @@ -package streamformatter - -import ( - "encoding/json" - "io" - - "github.com/docker/docker/pkg/jsonmessage" -) - -type streamWriter struct { - io.Writer - lineFormat func([]byte) string -} - -func (sw *streamWriter) Write(buf []byte) (int, error) { - formattedBuf := sw.format(buf) - n, err := sw.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -func (sw *streamWriter) format(buf []byte) []byte { - msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} - b, err := json.Marshal(msg) - if err != nil { - return FormatError(err) - } - return appendNewline(b) -} - -// NewStdoutWriter returns a writer which formats the output as json message -// representing stdout lines -func NewStdoutWriter(out io.Writer) io.Writer { - return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { - return string(buf) - }} -} - -// NewStderrWriter returns a writer which formats the output as json message -// representing stderr lines -func NewStderrWriter(out io.Writer) io.Writer { - return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { - return "\033[91m" + string(buf) + "\033[0m" - }} -} diff --git a/vendor/github.com/go-git/go-billy/v5/fs.go b/vendor/github.com/go-git/go-billy/v5/fs.go index d86f9d823..14a020a35 100644 --- a/vendor/github.com/go-git/go-billy/v5/fs.go +++ b/vendor/github.com/go-git/go-billy/v5/fs.go @@ -128,12 +128,18 @@ type Symlink interface { Readlink(link string) (string, error) } -// Change abstract the FileInfo change related operations in a storage-agnostic -// interface as an extension to the Basic interface -type Change interface { +// Chmod abstracts the logic around changing file modes. +type Chmod interface { // Chmod changes the mode of the named file to mode. If the file is a // symbolic link, it changes the mode of the link's target. Chmod(name string, mode os.FileMode) error +} + +// Change abstract the FileInfo change related operations in a storage-agnostic +// interface as an extension to the Basic interface +type Change interface { + Chmod + // Lchown changes the numeric uid and gid of the named file. If the file is // a symbolic link, it changes the uid and gid of the link itself. Lchown(name string, uid, gid int) error diff --git a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go index 8b44e784b..dbdf11186 100644 --- a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go +++ b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go @@ -1,6 +1,7 @@ package chroot import ( + "errors" "os" "path/filepath" "strings" @@ -200,6 +201,19 @@ func (fs *ChrootHelper) Readlink(link string) (string, error) { return string(os.PathSeparator) + target, nil } +func (fs *ChrootHelper) Chmod(path string, mode os.FileMode) error { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return err + } + + c, ok := fs.underlying.(billy.Chmod) + if !ok { + return errors.New("underlying fs does not implement billy.Chmod") + } + return c.Chmod(fullpath, mode) +} + func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { fullpath, err := fs.underlyingPath(path) if err != nil { diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go index 6cbd7d08c..152cb9e4f 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -177,6 +177,10 @@ func (fs *Memory) Remove(filename string) error { return fs.s.Remove(filename) } +func (fs *Memory) Chmod(path string, mode os.FileMode) error { + return fs.s.Chmod(path, mode) +} + // Falls back to Go's filepath.Join, which works differently depending on the // OS where the code is being executed. func (fs *Memory) Join(elem ...string) string { diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go index 16b48ce00..9960996b8 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -169,6 +169,18 @@ func (s *storage) Remove(path string) error { return nil } +func (s *storage) Chmod(path string, mode os.FileMode) error { + path = clean(path) + + f, has := s.Get(path) + if !has { + return os.ErrNotExist + } + + f.mode = mode + return nil +} + func clean(path string) string { return filepath.Clean(filepath.FromSlash(path)) } diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go index c0a610990..6f5448056 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go @@ -176,6 +176,14 @@ func (fs *BoundOS) Readlink(link string) (string, error) { return os.Readlink(link) } +func (fs *BoundOS) Chmod(path string, mode os.FileMode) error { + abspath, err := fs.abs(path) + if err != nil { + return err + } + return os.Chmod(abspath, mode) +} + // Chroot returns a new OS filesystem, with the base dir set to the // result of joining the provided path with the underlying base dir. func (fs *BoundOS) Chroot(path string) (billy.Filesystem, error) { diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go index fd65e773c..413b3b898 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go @@ -74,6 +74,10 @@ func (fs *ChrootOS) Remove(filename string) error { return os.Remove(filename) } +func (fs *ChrootOS) Chmod(path string, mode os.FileMode) error { + return os.Chmod(path, mode) +} + func (fs *ChrootOS) TempFile(dir, prefix string) (billy.File, error) { if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { return nil, err diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go index 9237a7434..136c3e2ac 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go @@ -4,6 +4,7 @@ import ( "bytes" "io" "sort" + "sync" encbin "encoding/binary" @@ -57,8 +58,9 @@ type MemoryIndex struct { PackfileChecksum [hash.Size]byte IdxChecksum [hash.Size]byte - offsetHash map[int64]plumbing.Hash - offsetHashIsFull bool + offsetHash map[int64]plumbing.Hash + offsetBuildOnce sync.Once + mu sync.RWMutex } var _ Index = (*MemoryIndex)(nil) @@ -126,13 +128,13 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { offset := idx.getOffset(k, i) - if !idx.offsetHashIsFull { - // Save the offset for reverse lookup - if idx.offsetHash == nil { - idx.offsetHash = make(map[int64]plumbing.Hash) - } - idx.offsetHash[int64(offset)] = h + // Save the offset for reverse lookup + idx.mu.Lock() + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) } + idx.offsetHash[int64(offset)] = h + idx.mu.Unlock() return int64(offset), nil } @@ -173,20 +175,17 @@ func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { var hash plumbing.Hash var ok bool - if idx.offsetHash != nil { - if hash, ok = idx.offsetHash[o]; ok { - return hash, nil - } + var genErr error + idx.offsetBuildOnce.Do(func() { + genErr = idx.genOffsetHash() + }) + if genErr != nil { + return plumbing.ZeroHash, genErr } - // Lazily generate the reverse offset/hash map if required. - if !idx.offsetHashIsFull || idx.offsetHash == nil { - if err := idx.genOffsetHash(); err != nil { - return plumbing.ZeroHash, err - } - - hash, ok = idx.offsetHash[o] - } + idx.mu.RLock() + hash, ok = idx.offsetHash[o] + idx.mu.RUnlock() if !ok { return plumbing.ZeroHash, plumbing.ErrObjectNotFound @@ -202,8 +201,7 @@ func (idx *MemoryIndex) genOffsetHash() error { return err } - idx.offsetHash = make(map[int64]plumbing.Hash, count) - idx.offsetHashIsFull = true + offsetHash := make(map[int64]plumbing.Hash, count) var hash plumbing.Hash i := uint32(0) @@ -212,11 +210,15 @@ func (idx *MemoryIndex) genOffsetHash() error { for secondLevel := uint32(0); i < fanoutValue; i++ { copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) - idx.offsetHash[offset] = hash + offsetHash[offset] = hash secondLevel++ } } + idx.mu.Lock() + idx.offsetHash = offsetHash + idx.mu.Unlock() + return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index 3d096e18b..78627b065 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -62,10 +62,55 @@ type Commit struct { ParentHashes []plumbing.Hash // Encoding is the encoding of the commit. Encoding MessageEncoding + // List of extra headers of the commit + ExtraHeaders []ExtraHeader s storer.EncodedObjectStorer } +// ExtraHeader holds any non-standard header +type ExtraHeader struct { + // Header name + Key string + // Value of the header + Value string +} + +// Implement fmt.Formatter for ExtraHeader +func (h ExtraHeader) Format(f fmt.State, verb rune) { + switch verb { + case 'v': + fmt.Fprintf(f, "ExtraHeader{Key: %v, Value: %v}", h.Key, h.Value) + default: + fmt.Fprintf(f, "%s", h.Key) + if len(h.Value) > 0 { + fmt.Fprint(f, " ") + // Content may be spread on multiple lines, if so we need to + // prepend each of them with a space for "continuation". + value := strings.TrimSuffix(h.Value, "\n") + lines := strings.Split(value, "\n") + fmt.Fprint(f, strings.Join(lines, "\n ")) + } + } +} + +// Parse an extra header and indicate whether it may be continue on the next line +func parseExtraHeader(line []byte) (ExtraHeader, bool) { + split := bytes.SplitN(line, []byte{' '}, 2) + + out := ExtraHeader { + Key: string(bytes.TrimRight(split[0], "\n")), + Value: "", + } + + if len(split) == 2 { + out.Value += string(split[1]) + return out, true + } else { + return out, false + } +} + // GetCommit gets a commit from an object storer and decodes it. func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { o, err := s.EncodedObject(plumbing.CommitObject, h) @@ -204,6 +249,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { var mergetag bool var pgpsig bool var msgbuf bytes.Buffer + var extraheader *ExtraHeader = nil for { line, err := r.ReadBytes('\n') if err != nil && err != io.EOF { @@ -230,7 +276,19 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } } + if extraheader != nil { + if len(line) > 0 && line[0] == ' ' { + extraheader.Value += string(line[1:]) + continue + } else { + extraheader.Value = strings.TrimRight(extraheader.Value, "\n") + c.ExtraHeaders = append(c.ExtraHeaders, *extraheader) + extraheader = nil + } + } + if !message { + original_line := line line = bytes.TrimSpace(line) if len(line) == 0 { message = true @@ -261,6 +319,13 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { case headerpgp: c.PGPSignature += string(data) + "\n" pgpsig = true + default: + h, maybecontinued := parseExtraHeader(original_line) + if maybecontinued { + extraheader = &h + } else { + c.ExtraHeaders = append(c.ExtraHeaders, h) + } } } else { msgbuf.Write(line) @@ -341,6 +406,13 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { } } + for _, header := range c.ExtraHeaders { + + if _, err = fmt.Fprintf(w, "\n%s", header); err != nil { + return err + } + } + if c.PGPSignature != "" && includeSig { if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { return err diff --git a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md index 8f349c4b8..9a7c0ca73 100644 --- a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md +++ b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md @@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.1] - 2025-10-05 + +### Fixed + +- Encode DEL (0x7f) control character by [@spaceone] +- Modernize code through Go 1.21 by [@ChrisHines] + +[0.6.1]: https://github.com/go-logfmt/logfmt/compare/v0.6.0...v0.6.1 + ## [0.6.0] - 2023-01-30 [0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0 @@ -80,3 +89,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [@judwhite]: https://github.com/judwhite [@nussjustin]: https://github.com/nussjustin [@alexanderjophus]: https://github.com/alexanderjophus +[@spaceone]: https://github.com/spaceone diff --git a/vendor/github.com/go-logfmt/logfmt/encode.go b/vendor/github.com/go-logfmt/logfmt/encode.go index 4ea9d2399..d8f69351a 100644 --- a/vendor/github.com/go-logfmt/logfmt/encode.go +++ b/vendor/github.com/go-logfmt/logfmt/encode.go @@ -13,7 +13,7 @@ import ( // MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence // of alternating keys and values. -func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) { +func MarshalKeyvals(keyvals ...any) ([]byte, error) { buf := &bytes.Buffer{} if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil { return nil, err @@ -45,7 +45,7 @@ var ( // EncodeKeyval writes the logfmt encoding of key and value to the stream. A // single space is written before the second and subsequent keys in a record. // Nothing is written if a non-nil error is returned. -func (enc *Encoder) EncodeKeyval(key, value interface{}) error { +func (enc *Encoder) EncodeKeyval(key, value any) error { enc.scratch.Reset() if enc.needSep { if _, err := enc.scratch.Write(space); err != nil { @@ -72,7 +72,7 @@ func (enc *Encoder) EncodeKeyval(key, value interface{}) error { // unsupported type or that cause a MarshalerError are replaced by their error // but do not cause EncodeKeyvals to return an error. If a non-nil error is // returned some key/value pairs may not have be written. -func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error { +func (enc *Encoder) EncodeKeyvals(keyvals ...any) error { if len(keyvals) == 0 { return nil } @@ -122,7 +122,7 @@ var ErrUnsupportedKeyType = errors.New("unsupported key type") // unsupported type. var ErrUnsupportedValueType = errors.New("unsupported value type") -func writeKey(w io.Writer, key interface{}) error { +func writeKey(w io.Writer, key any) error { if key == nil { return ErrNilKey } @@ -155,7 +155,7 @@ func writeKey(w io.Writer, key interface{}) error { switch rkey.Kind() { case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: return ErrUnsupportedKeyType - case reflect.Ptr: + case reflect.Pointer: if rkey.IsNil() { return ErrNilKey } @@ -170,7 +170,7 @@ func writeKey(w io.Writer, key interface{}) error { // functions it causes them to remove invalid key runes from strings or byte // slices respectively. func keyRuneFilter(r rune) rune { - if r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError { + if r <= ' ' || r == '=' || r == '"' || r == 0x7f || r == utf8.RuneError { return -1 } return r @@ -194,7 +194,7 @@ func writeBytesKey(w io.Writer, key []byte) error { return err } -func writeValue(w io.Writer, value interface{}) error { +func writeValue(w io.Writer, value any) error { switch v := value.(type) { case nil: return writeBytesValue(w, null) @@ -222,7 +222,7 @@ func writeValue(w io.Writer, value interface{}) error { switch rvalue.Kind() { case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: return ErrUnsupportedValueType - case reflect.Ptr: + case reflect.Pointer: if rvalue.IsNil() { return writeBytesValue(w, null) } @@ -233,7 +233,7 @@ func writeValue(w io.Writer, value interface{}) error { } func needsQuotedValueRune(r rune) bool { - return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError + return r <= ' ' || r == '=' || r == '"' || r == 0x7f || r == utf8.RuneError } func writeStringValue(w io.Writer, value string, ok bool) error { @@ -276,7 +276,7 @@ func (enc *Encoder) Reset() { func safeError(err error) (s string, ok bool) { defer func() { if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(err); v.Kind() == reflect.Pointer && v.IsNil() { s, ok = "null", false } else { s, ok = fmt.Sprintf("PANIC:%v", panicVal), false @@ -290,7 +290,7 @@ func safeError(err error) (s string, ok bool) { func safeString(str fmt.Stringer) (s string, ok bool) { defer func() { if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(str); v.Kind() == reflect.Pointer && v.IsNil() { s, ok = "null", false } else { s, ok = fmt.Sprintf("PANIC:%v", panicVal), true @@ -304,7 +304,7 @@ func safeString(str fmt.Stringer) (s string, ok bool) { func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) { defer func() { if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(tm); v.Kind() == reflect.Pointer && v.IsNil() { b, err = nil, nil } else { b, err = nil, fmt.Errorf("panic when marshalling: %s", panicVal) diff --git a/vendor/github.com/go-logfmt/logfmt/jsonstring.go b/vendor/github.com/go-logfmt/logfmt/jsonstring.go index 030ac85fc..26e1b1c13 100644 --- a/vendor/github.com/go-logfmt/logfmt/jsonstring.go +++ b/vendor/github.com/go-logfmt/logfmt/jsonstring.go @@ -19,7 +19,7 @@ import ( var hex = "0123456789abcdef" var bufferPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, } @@ -40,7 +40,7 @@ func writeQuotedString(w io.Writer, s string) (int, error) { start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' { + if 0x20 <= b && b != '\\' && b != '"' && b != 0x7f { i++ continue } @@ -91,14 +91,14 @@ func writeQuotedString(w io.Writer, s string) (int, error) { return n, err } -// NOTE: keep in sync with writeQuoteString above. +// NOTE: keep in sync with writeQuotedString above. func writeQuotedBytes(w io.Writer, s []byte) (int, error) { buf := getBuffer() buf.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' { + if 0x20 <= b && b != '\\' && b != '"' && b != 0x7f { i++ continue } diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig index faef0c91e..c37602a02 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -19,3 +19,6 @@ indent_size = 2 [.golangci.yaml] indent_size = 2 + +[devenv.yaml] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore index 470e7ca2b..71caea19f 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/.gitignore +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -1,6 +1,10 @@ -/.devenv/ -/.direnv/ -/.pre-commit-config.yaml /bin/ /build/ /var/ + +# Devenv +.devenv* +devenv.local.nix +devenv.local.yaml +.direnv +.pre-commit-config.yaml diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.lock b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock new file mode 100644 index 000000000..72c2c9b4d --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.lock @@ -0,0 +1,103 @@ +{ + "nodes": { + "devenv": { + "locked": { + "dir": "src/modules", + "lastModified": 1765288076, + "owner": "cachix", + "repo": "devenv", + "rev": "93c055af1e8fcac49251f1b2e1c57f78620ad351", + "type": "github" + }, + "original": { + "dir": "src/modules", + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1765121682, + "owner": "edolstra", + "repo": "flake-compat", + "rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1765016596, + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "548fc44fca28a5e81c5d6b846e555e6b9c2a5a3c", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1762808025, + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1764580874, + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "dcf61356c3ab25f1362b4a4428a6d871e84f1d1d", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": [ + "git-hooks" + ] + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.nix b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix new file mode 100644 index 000000000..b31ab7a1f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.nix @@ -0,0 +1,14 @@ +{ + pkgs, + ... +}: + +{ + languages = { + go.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml new file mode 100644 index 000000000..68616a49c --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/devenv.yaml @@ -0,0 +1,4 @@ +# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json +inputs: + nixpkgs: + url: github:cachix/devenv-nixpkgs/rolling diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock deleted file mode 100644 index 5e67bdd6b..000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock +++ /dev/null @@ -1,294 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": [ - "devenv" - ], - "flake-compat": [ - "devenv" - ], - "git-hooks": [ - "devenv" - ], - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1742042642, - "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=", - "owner": "cachix", - "repo": "cachix", - "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "latest", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat", - "git-hooks": "git-hooks", - "nix": "nix", - "nixpkgs": "nixpkgs_3" - }, - "locked": { - "lastModified": 1744876578, - "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=", - "owner": "cachix", - "repo": "devenv", - "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": [ - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1712014858, - "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-parts_2": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1743550720, - "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "c621e8422220273271f52058f618c94e405bb0f5", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "git-hooks": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1742649964, - "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "git-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "libgit2": { - "flake": false, - "locked": { - "lastModified": 1697646580, - "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", - "owner": "libgit2", - "repo": "libgit2", - "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", - "type": "github" - }, - "original": { - "owner": "libgit2", - "repo": "libgit2", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": [ - "devenv" - ], - "flake-parts": "flake-parts", - "libgit2": "libgit2", - "nixpkgs": "nixpkgs_2", - "nixpkgs-23-11": [ - "devenv" - ], - "nixpkgs-regression": [ - "devenv" - ], - "pre-commit-hooks": [ - "devenv" - ] - }, - "locked": { - "lastModified": 1741798497, - "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=", - "owner": "domenkozar", - "repo": "nix", - "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.24", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1733212471, - "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1743296961, - "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", - "owner": "nix-community", - "repo": "nixpkgs.lib", - "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nixpkgs.lib", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1717432640, - "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "release-24.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1733477122, - "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1744536153, - "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts_2", - "nixpkgs": "nixpkgs_4" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix deleted file mode 100644 index 3b116f426..000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = - inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ - "x86_64-linux" - "x86_64-darwin" - "aarch64-darwin" - ]; - - perSystem = - { pkgs, ... }: - rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - }; - - packages = with pkgs; [ - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - }; - }; - }; -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 7c35bce02..9087fd96c 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -173,6 +173,25 @@ // Public: "I made it through!" // } // +// # Custom Decoding with Unmarshaler +// +// Types can implement the Unmarshaler interface to control their own decoding. The interface +// behaves similarly to how UnmarshalJSON does in the standard library. It can be used as an +// alternative or companion to a DecodeHook. +// +// type TrimmedString string +// +// func (t *TrimmedString) UnmarshalMapstructure(input any) error { +// str, ok := input.(string) +// if !ok { +// return fmt.Errorf("expected string, got %T", input) +// } +// *t = TrimmedString(strings.TrimSpace(str)) +// return nil +// } +// +// See the Unmarshaler interface documentation for more details. +// // # Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct @@ -218,6 +237,17 @@ type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error) // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error) +// Unmarshaler is the interface implemented by types that can unmarshal +// themselves. UnmarshalMapstructure receives the input data (potentially +// transformed by DecodeHook) and should populate the receiver with the +// decoded values. +// +// The Unmarshaler interface takes precedence over the default decoding +// logic for any type (structs, slices, maps, primitives, etc.). +type Unmarshaler interface { + UnmarshalMapstructure(any) error +} + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { @@ -281,6 +311,13 @@ type DecoderConfig struct { // } Squash bool + // Deep will map structures in slices instead of copying them + // + // type Parent struct { + // Children []Child `mapstructure:",deep"` + // } + Deep bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -290,9 +327,15 @@ type DecoderConfig struct { Result any // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" + // defaults to "mapstructure". Multiple tag names can be specified + // as a comma-separated list (e.g., "yaml,json"), and the first + // matching non-empty tag will be used. TagName string + // RootName specifies the name to use for the root element in error messages. For example: + // '' has unset fields: + RootName string + // The option of the value in the tag that indicates a field should // be squashed. This defaults to "squash". SquashTagOption string @@ -304,11 +347,34 @@ type DecoderConfig struct { // MatchName is the function used to match the map key to the struct // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. + // + // MatchName is used as a fallback comparison when the direct key lookup fails. + // See also MapFieldName for transforming field names before lookup. MatchName func(mapKey, fieldName string) bool // DecodeNil, if set to true, will cause the DecodeHook (if present) to run // even if the input is nil. This can be used to provide default values. DecodeNil bool + + // MapFieldName is the function used to convert the struct field name to the map's key name. + // + // This is useful for automatically converting between naming conventions without + // explicitly tagging each field. For example, to convert Go's PascalCase field names + // to snake_case map keys: + // + // MapFieldName: func(s string) string { + // return strcase.ToSnake(s) + // } + // + // When decoding from a map to a struct, the transformed field name is used for + // the initial lookup. If not found, MatchName is used as a fallback comparison. + // Explicit struct tags always take precedence over MapFieldName. + MapFieldName func(string) string + + // DisableUnmarshaler, if set to true, disables the use of the Unmarshaler + // interface. Types implementing Unmarshaler will be decoded using the + // standard struct decoding logic instead. + DisableUnmarshaler bool } // A Decoder takes a raw interface value and turns it into structured @@ -445,6 +511,12 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.MatchName = strings.EqualFold } + if config.MapFieldName == nil { + config.MapFieldName = func(s string) string { + return s + } + } + result := &Decoder{ config: config, } @@ -458,7 +530,7 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input any) error { - err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode(d.config.RootName, input, reflect.ValueOf(d.config.Result).Elem()) // Retain some of the original behavior when multiple errors ocurr var joinedErr interface{ Unwrap() []error } @@ -540,36 +612,50 @@ func (d *Decoder) decode(name string, input any, outVal reflect.Value) error { var err error addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Complex64: - err = d.decodeComplex(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + + // Check if the target implements Unmarshaler and use it if not disabled + unmarshaled := false + if !d.config.DisableUnmarshaler { + if unmarshaler, ok := getUnmarshaler(outVal); ok { + if err = unmarshaler.UnmarshalMapstructure(input); err != nil { + err = newDecodeError(name, err) + } + unmarshaled = true + } + } + + if !unmarshaled { + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + } } // If we reached here, then we successfully decoded SOMETHING, so @@ -668,7 +754,7 @@ func (d *Decoder) decodeString(name string, data any, val reflect.Value) error { case reflect.Uint8: var uints []uint8 if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) + uints = make([]uint8, dataVal.Len()) for i := range uints { uints[i] = dataVal.Index(i).Interface().(uint8) } @@ -1060,8 +1146,8 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name + tagValue, _ := getTagValue(f, d.config.TagName) + keyName := d.config.MapFieldName(f.Name) if tagValue == "" && d.config.IgnoreUntaggedFields { continue @@ -1070,6 +1156,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + // If Deep is set in the config, set as default value. + deep := d.config.Deep + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) // Determine the name of the key in the map @@ -1078,12 +1167,12 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + if strings.Contains(tagValue[index+1:], "omitempty") && isEmptyValue(v) { continue } // If "omitzero" is specified in the tag, it ignores zero values. - if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() { + if strings.Contains(tagValue[index+1:], "omitzero") && v.IsZero() { continue } @@ -1103,7 +1192,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re ) } } else { - if strings.Index(tagValue[index+1:], "remain") != -1 { + if strings.Contains(tagValue[index+1:], "remain") { if v.Kind() != reflect.Map { return newDecodeError( name+"."+f.Name, @@ -1118,6 +1207,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re continue } } + + deep = deep || strings.Contains(tagValue[index+1:], "deep") + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { keyName = keyNameTagValue } @@ -1164,6 +1256,41 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) } + case reflect.Slice: + if deep { + var childType reflect.Type + switch v.Type().Elem().Kind() { + case reflect.Struct: + childType = reflect.TypeOf(map[string]any{}) + default: + childType = v.Type().Elem() + } + + sType := reflect.SliceOf(childType) + + addrVal := reflect.New(sType) + + vSlice := reflect.MakeSlice(sType, v.Len(), v.Cap()) + + if v.Len() > 0 { + reflect.Indirect(addrVal).Set(vSlice) + + err := d.decode(keyName, v.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + } + + vSlice = reflect.Indirect(addrVal) + + valMap.SetMapIndex(reflect.ValueOf(keyName), vSlice) + + break + } + + // When deep mapping is not needed, fallthrough to normal copy + fallthrough + default: valMap.SetMapIndex(reflect.ValueOf(keyName), v) } @@ -1471,7 +1598,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e remain := false // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + tagParts := getTagParts(fieldType, d.config.TagName) + if len(tagParts) == 0 { + tagParts = []string{""} + } for _, tag := range tagParts[1:] { if tag == d.config.SquashTagOption { squash = true @@ -1492,6 +1622,18 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !fieldVal.IsNil() { structs = append(structs, fieldVal.Elem().Elem()) } + case reflect.Ptr: + if fieldVal.Type().Elem().Kind() == reflect.Struct { + if fieldVal.IsNil() { + fieldVal.Set(reflect.New(fieldVal.Type().Elem())) + } + structs = append(structs, fieldVal.Elem()) + } else { + errs = append(errs, newDecodeError( + name+"."+fieldType.Name, + fmt.Errorf("unsupported type for squashed pointer: %s", fieldVal.Type().Elem().Kind()), + )) + } default: errs = append(errs, newDecodeError( name+"."+fieldType.Name, @@ -1516,13 +1658,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field, fieldValue := f.field, f.val fieldName := field.Name - tagValue := field.Tag.Get(d.config.TagName) + tagValue, _ := getTagValue(field, d.config.TagName) if tagValue == "" && d.config.IgnoreUntaggedFields { continue } tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue + } else { + fieldName = d.config.MapFieldName(fieldName) } rawMapKey := reflect.ValueOf(fieldName) @@ -1605,8 +1749,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } sort.Strings(keys) + // Improve error message when name is empty by showing the target struct type + // in the case where it is empty for embedded structs. + errorName := name + if errorName == "" { + errorName = val.Type().String() + } errs = append(errs, newDecodeError( - name, + errorName, fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")), )) } @@ -1692,7 +1842,7 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields return true } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + if checkMapstructureTags && hasAnyTag(f, tagName) { // check for mapstructure tags inside return true } } @@ -1700,13 +1850,99 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, } func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + if v.Kind() != reflect.Ptr { return v } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref + + switch v.Elem().Kind() { + case reflect.Slice: + return v.Elem() + + case reflect.Struct: + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v + + default: + return v } - return v +} + +func hasAnyTag(field reflect.StructField, tagName string) bool { + _, ok := getTagValue(field, tagName) + return ok +} + +func getTagParts(field reflect.StructField, tagName string) []string { + tagValue, ok := getTagValue(field, tagName) + if !ok { + return nil + } + return strings.Split(tagValue, ",") +} + +func getTagValue(field reflect.StructField, tagName string) (string, bool) { + for _, name := range splitTagNames(tagName) { + if tag := field.Tag.Get(name); tag != "" { + return tag, true + } + } + return "", false +} + +func splitTagNames(tagName string) []string { + if tagName == "" { + return []string{"mapstructure"} + } + parts := strings.Split(tagName, ",") + result := make([]string, 0, len(parts)) + + for _, name := range parts { + name = strings.TrimSpace(name) + if name != "" { + result = append(result, name) + } + } + + return result +} + +// unmarshalerType is cached for performance +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +// getUnmarshaler checks if the value implements Unmarshaler and returns +// the Unmarshaler and a boolean indicating if it was found. It handles both +// pointer and value receivers. +func getUnmarshaler(val reflect.Value) (Unmarshaler, bool) { + // Skip invalid or nil values + if !val.IsValid() { + return nil, false + } + + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil, false + } + } + + // Check pointer receiver first (most common case) + if val.CanAddr() { + ptrVal := val.Addr() + // Quick check: if no methods, can't implement any interface + if ptrVal.Type().NumMethod() > 0 && ptrVal.Type().Implements(unmarshalerType) { + return ptrVal.Interface().(Unmarshaler), true + } + } + + // Check value receiver + // Quick check: if no methods, can't implement any interface + if val.Type().NumMethod() > 0 && val.CanInterface() && val.Type().Implements(unmarshalerType) { + return val.Interface().(Unmarshaler), true + } + + return nil, false } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index a65d88eb8..04b4bebf3 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//internal/httprule", "//utilities", "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//health/grpc_health_v1", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 2f2b34243..00b2228a1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -201,13 +201,13 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM if timeout != 0 { ctx, _ = context.WithTimeout(ctx, timeout) } - if len(pairs) == 0 { - return ctx, nil, nil - } md := metadata.Pairs(pairs...) for _, mda := range mux.metadataAnnotators { md = metadata.Join(md, mda(ctx, req)) } + if len(md) == 0 { + return ctx, nil, nil + } return ctx, md, nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 8376d1e0e..3d0706300 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -66,7 +66,7 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { var ( // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + protoMessageType = reflect.TypeFor[proto.Message]() ) // marshalNonProto marshals a non-message field of a protobuf message. @@ -325,9 +325,9 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } -var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() +var typeProtoEnum = reflect.TypeFor[protoEnum]() -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeProtoMessage = reflect.TypeFor[proto.Message]() // Delimiter for newline encoded JSON streams. func (j *JSONPb) Delimiter() []byte { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 19255ec44..3eb161671 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health/grpc_health_v1" @@ -281,15 +282,22 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ) { _, outboundMarshaler := MarshalerForRequest(s, r) - - resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ - Service: r.URL.Query().Get("service"), - }) + annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath)) if err != nil { s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) return } + var md ServerMetadata + resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD)) + annotatedContext = NewServerMetadataContext(annotatedContext, md) + if err != nil { + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) + return + } + w.Header().Set("Content-Type", "application/json") if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { @@ -300,7 +308,7 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin err = status.Error(codes.NotFound, resp.String()) } - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4..af2ef6395 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -27,6 +27,16 @@ Use the links above for more information on each. # changelog +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +46,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7..d58b3fe42 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f..8c8baa4fc 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac..41db94cde 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f..a97cf1b5d 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d7..7d0efa881 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b0..99ddd4af9 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0..67d9e05b6 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e2..4f2a0d8c5 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd90..218a38bc4 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d..a2c82fcd2 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5..860a99416 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b..b22b297e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2..2329e996f 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376..30df5513d 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164b..2ffbfdf37 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f..c1192ec38 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde7..85dcd28c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8..cf8cad00d 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7d..9180a3a58 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7..d88f067e5 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8..3a0f4e7fb 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9..0bfb0e43c 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07..1f8c3cec2 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 000000000..2efc0497b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f..336c28893 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4..3198d7189 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae..1a869710d 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/moby/go-archive/archive.go b/vendor/github.com/moby/go-archive/archive.go index 7a105aef1..8dce2e6e2 100644 --- a/vendor/github.com/moby/go-archive/archive.go +++ b/vendor/github.com/moby/go-archive/archive.go @@ -36,10 +36,6 @@ import ( const ImpliedDirectoryMode = 0o755 type ( - // Compression is the state represents if compressed or not. - // - // Deprecated: use [compression.Compression]. - Compression = compression.Compression // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int @@ -95,14 +91,6 @@ func NewDefaultArchiver() *Archiver { // in order for the test to pass. type breakoutError error -const ( - Uncompressed = compression.None // Deprecated: use [compression.None]. - Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2]. - Gzip = compression.Gzip // Deprecated: use [compression.Gzip]. - Xz = compression.Xz // Deprecated: use [compression.Xz]. - Zstd = compression.Zstd // Deprecated: use [compression.Zstd]. -) - const ( AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. @@ -126,27 +114,6 @@ func IsArchivePath(path string) bool { return err == nil } -// DetectCompression detects the compression algorithm of the source. -// -// Deprecated: use [compression.Detect]. -func DetectCompression(source []byte) compression.Compression { - return compression.Detect(source) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -// -// Deprecated: use [compression.DecompressStream]. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - return compression.DecompressStream(archive) -} - -// CompressStream compresses the dest with specified compression algorithm. -// -// Deprecated: use [compression.CompressStream]. -func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) { - return compression.CompressStream(dest, comp) -} - // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and @@ -235,13 +202,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi return pipeReader } -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Deprecated: use [tarheader.FileInfoHeaderNoLookups]. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - return tarheader.FileInfoHeaderNoLookups(fi, link) -} - // FileInfoHeader creates a populated Header from fi. // // Compared to the archive/tar package, this function fills in less information diff --git a/vendor/github.com/moby/go-archive/xattr_supported_unix.go b/vendor/github.com/moby/go-archive/xattr_supported_unix.go index 4d8824158..58a03e4a6 100644 --- a/vendor/github.com/moby/go-archive/xattr_supported_unix.go +++ b/vendor/github.com/moby/go-archive/xattr_supported_unix.go @@ -1,4 +1,4 @@ -//go:build !linux && !windows +//go:build darwin || freebsd || netbsd package archive diff --git a/vendor/github.com/moby/moby/api/LICENSE b/vendor/github.com/moby/moby/api/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/moby/moby/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/api/pkg/authconfig/authconfig.go b/vendor/github.com/moby/moby/api/pkg/authconfig/authconfig.go new file mode 100644 index 000000000..51f883e1c --- /dev/null +++ b/vendor/github.com/moby/moby/api/pkg/authconfig/authconfig.go @@ -0,0 +1,92 @@ +package authconfig + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/moby/moby/api/types/registry" +) + +// Encode serializes the auth configuration as a base64url encoded +// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +func Encode(authConfig registry.AuthConfig) (string, error) { + // Older daemons (or registries) may not handle an empty string, + // which resulted in an "io.EOF" when unmarshaling or decoding. + // + // FIXME(thaJeztah): find exactly what code-paths are impacted by this. + // if authConfig == (AuthConfig{}) { return "", nil } + buf, err := json.Marshal(authConfig) + if err != nil { + return "", errInvalidParameter{err} + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// Decode decodes base64url encoded ([RFC4648, section 5]) JSON +// authentication information as sent through the X-Registry-Auth header. +// +// This function always returns an [AuthConfig], even if an error occurs. It is up +// to the caller to decide if authentication is required, and if the error can +// be ignored. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +func Decode(authEncoded string) (*registry.AuthConfig, error) { + if authEncoded == "" { + return ®istry.AuthConfig{}, nil + } + + decoded, err := base64.URLEncoding.DecodeString(authEncoded) + if err != nil { + var e base64.CorruptInputError + if errors.As(err, &e) { + return ®istry.AuthConfig{}, invalid(errors.New("must be a valid base64url-encoded string")) + } + return ®istry.AuthConfig{}, invalid(err) + } + + if bytes.Equal(decoded, []byte("{}")) { + return ®istry.AuthConfig{}, nil + } + + return decode(bytes.NewReader(decoded)) +} + +// DecodeRequestBody decodes authentication information as sent as JSON in the +// body of a request. This function is to provide backward compatibility with old +// clients and API versions. Current clients and API versions expect authentication +// to be provided through the X-Registry-Auth header. +// +// Like [Decode], this function always returns an [AuthConfig], even if an +// error occurs. It is up to the caller to decide if authentication is required, +// and if the error can be ignored. +func DecodeRequestBody(r io.ReadCloser) (*registry.AuthConfig, error) { + return decode(r) +} + +func decode(r io.Reader) (*registry.AuthConfig, error) { + authConfig := ®istry.AuthConfig{} + if err := json.NewDecoder(r).Decode(authConfig); err != nil { + // always return an (empty) AuthConfig to increase compatibility with + // the existing API. + return ®istry.AuthConfig{}, invalid(fmt.Errorf("invalid JSON: %w", err)) + } + return authConfig, nil +} + +func invalid(err error) error { + return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { return e.error } + +func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go new file mode 100644 index 000000000..931ae10ab --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/moby/moby/api/types/build/build.go b/vendor/github.com/moby/moby/api/types/build/build.go new file mode 100644 index 000000000..db9839773 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/build.go @@ -0,0 +1,16 @@ +package build + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// Result contains the image id of a successful build. +type Result struct { + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/build/cache.go b/vendor/github.com/moby/moby/api/types/build/cache.go new file mode 100644 index 000000000..39dd23a5f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/cache.go @@ -0,0 +1,35 @@ +package build + +import ( + "time" +) + +// CacheRecord contains information about a build cache record. +type CacheRecord struct { + // ID is the unique ID of the build cache record. + ID string + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// CachePruneReport contains the response for Engine API: +// POST "/build/prune" +type CachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/build/disk_usage.go b/vendor/github.com/moby/moby/api/types/build/disk_usage.go new file mode 100644 index 000000000..3613797db --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package build + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for build cache resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active build cache records. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of build cache records. + // + Items []CacheRecord `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive build cache records. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all build cache records. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by build cache records. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go new file mode 100644 index 000000000..c363783f2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go @@ -0,0 +1,8 @@ +package checkpoint + +// CreateRequest holds parameters to create a checkpoint from a container. +type CreateRequest struct { + CheckpointID string + CheckpointDir string + Exit bool +} diff --git a/vendor/github.com/moby/moby/api/types/checkpoint/list.go b/vendor/github.com/moby/moby/api/types/checkpoint/list.go new file mode 100644 index 000000000..94a9c0a47 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/checkpoint/list.go @@ -0,0 +1,7 @@ +package checkpoint + +// Summary represents the details of a checkpoint when listing endpoints. +type Summary struct { + // Name is the name of the checkpoint. + Name string +} diff --git a/vendor/github.com/moby/moby/api/types/common/error_response.go b/vendor/github.com/moby/moby/api/types/common/error_response.go new file mode 100644 index 000000000..b49d3eea0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/error_response.go @@ -0,0 +1,17 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package common + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// Example: {"message":"Something went wrong."} +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/moby/moby/api/types/common/error_response_ext.go b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go new file mode 100644 index 000000000..c92dfe4b1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go @@ -0,0 +1,6 @@ +package common + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/moby/moby/api/types/common/id_response.go b/vendor/github.com/moby/moby/api/types/common/id_response.go new file mode 100644 index 000000000..7dfe4bf12 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/id_response.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package common + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// +// swagger:model IDResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/change_type.go b/vendor/github.com/moby/moby/api/types/container/change_type.go new file mode 100644 index 000000000..52fc99235 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/change_type.go @@ -0,0 +1,17 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ChangeType Kind of change +// +// Can be one of: +// +// - `0`: Modified ("C") +// - `1`: Added ("A") +// - `2`: Deleted ("D") +// +// swagger:model ChangeType +type ChangeType uint8 diff --git a/vendor/github.com/moby/moby/api/types/container/change_types.go b/vendor/github.com/moby/moby/api/types/container/change_types.go new file mode 100644 index 000000000..3a3a83866 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/change_types.go @@ -0,0 +1,23 @@ +package container + +const ( + // ChangeModify represents the modify operation. + ChangeModify ChangeType = 0 + // ChangeAdd represents the add operation. + ChangeAdd ChangeType = 1 + // ChangeDelete represents the delete operation. + ChangeDelete ChangeType = 2 +) + +func (ct ChangeType) String() string { + switch ct { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/commit.go b/vendor/github.com/moby/moby/api/types/container/commit.go new file mode 100644 index 000000000..c5aab26ff --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/commit.go @@ -0,0 +1,7 @@ +package container + +import "github.com/moby/moby/api/types/common" + +// CommitResponse response for the commit API call, containing the ID of the +// image that was produced. +type CommitResponse = common.IDResponse diff --git a/vendor/github.com/moby/moby/api/types/container/config.go b/vendor/github.com/moby/moby/api/types/container/config.go new file mode 100644 index 000000000..78fa9f910 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/config.go @@ -0,0 +1,50 @@ +package container + +import ( + "time" + + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/network" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig = dockerspec.HealthcheckConfig + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts network.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd []string // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint []string // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/moby/moby/api/types/container/container.go b/vendor/github.com/moby/moby/api/types/container/container.go new file mode 100644 index 000000000..bffb3de87 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container.go @@ -0,0 +1,151 @@ +package container + +import ( + "os" + "time" + + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// PruneReport contains the response for Engine API: +// POST "/containers/prune" +type PruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// PathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type PathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see [mount.Type] definitions for details. + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// State stores container's running state +// it's part of ContainerJSONBase and returned by "inspect" command +type State struct { + Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// Summary contains response of Engine API: +// GET "/containers/json" +type Summary struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` + Command string + Created int64 + Ports []PortSummary + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State ContainerState + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` + } + Health *HealthSummary `json:",omitempty"` + NetworkSettings *NetworkSettingsSummary + Mounts []MountPoint +} + +// InspectResponse is the response for the GET "/containers/{name:.*}/json" +// endpoint. +type InspectResponse struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *State + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *HostConfig + + // GraphDriver contains information about the container's graph driver. + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` + + // Storage contains information about the storage used for the container's filesystem. + Storage *storage.Storage `json:"Storage,omitempty"` + + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` + Mounts []MountPoint + Config *Config + NetworkSettings *NetworkSettings + // ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container. + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/create_request.go b/vendor/github.com/moby/moby/api/types/container/create_request.go new file mode 100644 index 000000000..decb208af --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/create_request.go @@ -0,0 +1,13 @@ +package container + +import "github.com/moby/moby/api/types/network" + +// CreateRequest is the request message sent to the server for container +// create calls. It is a config wrapper that holds the container [Config] +// (portable) and the corresponding [HostConfig] (non-portable) and +// [network.NetworkingConfig]. +type CreateRequest struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/create_response.go b/vendor/github.com/moby/moby/api/types/container/create_response.go new file mode 100644 index 000000000..39d761aa9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/create_response.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// # OK response to ContainerCreate operation +// +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Example: ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743 + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Example: [] + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/disk_usage.go b/vendor/github.com/moby/moby/api/types/container/disk_usage.go new file mode 100644 index 000000000..c36721d3b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage information for container resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active containers. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of container summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive containers. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all containers. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by containers. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/errors.go b/vendor/github.com/moby/moby/api/types/container/errors.go new file mode 100644 index 000000000..32c978037 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/errors.go @@ -0,0 +1,9 @@ +package container + +type errInvalidParameter struct{ error } + +func (e *errInvalidParameter) InvalidParameter() {} + +func (e *errInvalidParameter) Unwrap() error { + return e.error +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec.go b/vendor/github.com/moby/moby/api/types/container/exec.go new file mode 100644 index 000000000..6895926ae --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec.go @@ -0,0 +1,35 @@ +package container + +import "github.com/moby/moby/api/types/common" + +// ExecCreateResponse is the response for a successful exec-create request. +// It holds the ID of the exec that was created. +// +// TODO(thaJeztah): make this a distinct type. +type ExecCreateResponse = common.IDResponse + +// ExecInspectResponse is the API response for the "GET /exec/{id}/json" +// endpoint and holds information about and exec. +type ExecInspectResponse struct { + ID string `json:"ID"` + Running bool `json:"Running"` + ExitCode *int `json:"ExitCode"` + ProcessConfig *ExecProcessConfig + OpenStdin bool `json:"OpenStdin"` + OpenStderr bool `json:"OpenStderr"` + OpenStdout bool `json:"OpenStdout"` + CanRemove bool `json:"CanRemove"` + ContainerID string `json:"ContainerID"` + DetachKeys []byte `json:"DetachKeys"` + Pid int `json:"Pid"` +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_create_request.go b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go new file mode 100644 index 000000000..dd7437cd2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go @@ -0,0 +1,17 @@ +package container + +// ExecCreateRequest is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateRequest struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_start_request.go b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go new file mode 100644 index 000000000..4c2ba0a77 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go @@ -0,0 +1,12 @@ +package container + +// ExecStartRequest is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartRequest struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/filesystem_change.go b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go new file mode 100644 index 000000000..b9ec83e52 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// FilesystemChange Change in the container's filesystem. +// +// swagger:model FilesystemChange +type FilesystemChange struct { + + // kind + // Required: true + Kind ChangeType `json:"Kind"` + + // Path to file or directory that has changed. + // + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/health.go b/vendor/github.com/moby/moby/api/types/container/health.go new file mode 100644 index 000000000..1a1ba84b4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/health.go @@ -0,0 +1,57 @@ +package container + +import ( + "fmt" + "strings" + "time" +) + +// HealthStatus is a string representation of the container's health. +type HealthStatus string + +// Health states +const ( + NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck + Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready + Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly + Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// HealthSummary stores a summary of the container's healthcheck results. +type HealthSummary struct { + Status HealthStatus // Status is one of [NoHealthcheck], [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +var validHealths = []string{ + string(NoHealthcheck), + string(Starting), + string(Healthy), + string(Unhealthy), +} + +// ValidateHealthStatus checks if the provided string is a valid +// container [HealthStatus]. +func ValidateHealthStatus(s HealthStatus) error { + switch s { + case NoHealthcheck, Starting, Healthy, Unhealthy: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))} + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig.go b/vendor/github.com/moby/moby/api/types/container/hostconfig.go new file mode 100644 index 000000000..0f889c651 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig.go @@ -0,0 +1,495 @@ +package container + +import ( + "errors" + "fmt" + "net/netip" + "strings" + + "github.com/docker/go-units" + "github.com/moby/moby/api/types/blkiodev" + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/network" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == CgroupnsModePrivate +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == CgroupnsModeHost +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == CgroupnsModeEmpty +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == IPCModePrivate +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == IPCModeHost +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == IPCModeShareable +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == IPCModeNone +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == network.NetworkNone +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == network.NetworkDefault +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !n.IsHost() && !n.IsContainer() +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !n.IsHost() +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + return n == "" || n.IsHost() +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + _, ok := containerID(string(c)) + return ok +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return c == "" || c.IsContainer() +} + +// Container returns the ID or name of the container whose cgroup will be used. +func (c CgroupSpec) Container() (idOrName string) { + idOrName, _ = containerID(string(c)) + return idOrName +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !n.IsHost() +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + return n == "" || n.IsHost() +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !n.IsHost() && !n.IsContainer() +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + return n == "" || n.IsHost() || validContainer(string(n)) +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name RestartPolicyMode + MaximumRetryCount int +} + +type RestartPolicyMode string + +const ( + RestartPolicyDisabled RestartPolicyMode = "no" + RestartPolicyAlways RestartPolicyMode = "always" + RestartPolicyOnFailure RestartPolicyMode = "on-failure" + RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" +) + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == RestartPolicyDisabled || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == RestartPolicyAlways +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == RestartPolicyOnFailure +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == RestartPolicyUnlessStopped +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// ValidateRestartPolicy validates the given RestartPolicy. +func ValidateRestartPolicy(policy RestartPolicy) error { + switch policy.Name { + case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: + if policy.MaximumRetryCount != 0 { + msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" + if policy.MaximumRetryCount < 0 { + msg += " and cannot be negative" + } + return &errInvalidParameter{errors.New(msg)} + } + return nil + case RestartPolicyOnFailure: + if policy.MaximumRetryCount < 0 { + return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")} + } + return nil + case "": + // Versions before v25.0.0 created an empty restart-policy "name" as + // default. Allow an empty name with "any" MaximumRetryCount for + // backward-compatibility. + return nil + default: + return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} + } +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset LogMode = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Ulimit is an alias for [units.Ulimit], which may be moving to a different +// location or become a local type. This alias is to help transitioning. +// +// Users are recommended to use this alias instead of using [units.Ulimit] directly. +type Ulimit = units.Ulimit + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings network.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) + Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime + + // Applicable to UNIX platforms + CapAdd []string // List of kernel capabilities to add to the container + CapDrop []string // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []netip.Addr `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} + +// containerID splits "container:" values. It returns the container +// ID or name, and whether an ID/name was found. It returns an empty string and +// a "false" if the value does not have a "container:" prefix. Further validation +// of the returned, including checking if the value is empty, should be handled +// by the caller. +func containerID(val string) (idOrName string, ok bool) { + k, v, hasSep := strings.Cut(val, ":") + if !hasSep || k != "container" { + return "", false + } + return v, true +} + +// validContainer checks if the given value is a "container:" mode with +// a non-empty name/ID. +func validContainer(val string) bool { + id, ok := containerID(val) + return ok && id != "" +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go new file mode 100644 index 000000000..326a5da7e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go @@ -0,0 +1,45 @@ +//go:build !windows + +package container + +import "github.com/moby/moby/api/types/network" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == network.NetworkBridge +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == network.NetworkHost +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkBridge + case n.IsHost(): + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go new file mode 100644 index 000000000..977a37602 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go @@ -0,0 +1,47 @@ +package container + +import "github.com/moby/moby/api/types/network" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == network.NetworkNat +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkNat + case n.IsHost(): + // Windows currently doesn't support host network-mode, so + // this would currently never happen.. + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/network_settings.go b/vendor/github.com/moby/moby/api/types/container/network_settings.go new file mode 100644 index 000000000..c51c0839d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/network_settings.go @@ -0,0 +1,22 @@ +package container + +import ( + "github.com/moby/moby/api/types/network" +) + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + + // Ports is a collection of [network.PortBinding] indexed by [network.Port] + Ports network.PortMap + + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsSummary provides a summary of container's networks +// in /containers/json +type NetworkSettingsSummary struct { + Networks map[string]*network.EndpointSettings +} diff --git a/vendor/github.com/moby/moby/api/types/container/port_summary.go b/vendor/github.com/moby/moby/api/types/container/port_summary.go new file mode 100644 index 000000000..68148eece --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/port_summary.go @@ -0,0 +1,33 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// PortSummary Describes a port-mapping between the container and the host. +// +// Example: {"PrivatePort":8080,"PublicPort":80,"Type":"tcp"} +// +// swagger:model PortSummary +type PortSummary struct { + + // Host IP address that the container's port is mapped to + IP netip.Addr `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + // Enum: ["tcp","udp","sctp"] + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/state.go b/vendor/github.com/moby/moby/api/types/container/state.go new file mode 100644 index 000000000..47c6d1249 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/state.go @@ -0,0 +1,40 @@ +package container + +import ( + "fmt" + "strings" +) + +// ContainerState is a string representation of the container's current state. +type ContainerState string + +const ( + StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started. + StateRunning ContainerState = "running" // StateRunning indicates that the container is running. + StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused. + StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting. + StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed. + StateExited ContainerState = "exited" // StateExited indicates that the container exited. + StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts. +) + +var validStates = []string{ + string(StateCreated), + string(StateRunning), + string(StatePaused), + string(StateRestarting), + string(StateRemoving), + string(StateExited), + string(StateDead), +} + +// ValidateContainerState checks if the provided string is a valid +// container [ContainerState]. +func ValidateContainerState(s ContainerState) error { + switch s { + case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))} + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/stats.go b/vendor/github.com/moby/moby/api/types/container/stats.go new file mode 100644 index 000000000..6a34f6ab7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/stats.go @@ -0,0 +1,224 @@ +package container + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// StatsResponse aggregates all types of stats of one container. +type StatsResponse struct { + // ID is the ID of the container for which the stats were collected. + ID string `json:"id,omitempty"` + + // Name is the name of the container for which the stats were collected. + Name string `json:"name,omitempty"` + + // OSType is the OS of the container ("linux" or "windows") to allow + // platform-specific handling of stats. + OSType string `json:"os_type,omitempty"` + + // Read is the date and time at which this sample was collected. + Read time.Time `json:"read"` + + // CPUStats contains CPU related info of the container. + CPUStats CPUStats `json:"cpu_stats,omitempty"` + + // MemoryStats aggregates all memory stats since container inception on Linux. + // Windows returns stats for commit and private working set only. + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + + // Networks contains Nntwork statistics for the container per interface. + // + // This field is omitted if the container has no networking enabled. + Networks map[string]NetworkStats `json:"networks,omitempty"` + + // ------------------------------------------------------------------------- + // Linux-specific stats, not populated on Windows. + // ------------------------------------------------------------------------- + + // PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + // + // This field is Linux-specific and omitted for Windows containers. + PidsStats PidsStats `json:"pids_stats,omitempty"` + + // BlkioStats stores all IO service stats for data read and write. + // + // This type is Linux-specific and holds many fields that are specific + // to cgroups v1. + // + // On a cgroup v2 host, all fields other than "io_service_bytes_recursive" + // are omitted or "null". + // + // This type is only populated on Linux and omitted for Windows containers. + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // ------------------------------------------------------------------------- + // Windows-specific stats, not populated on Linux. + // ------------------------------------------------------------------------- + + // NumProcs is the number of processors on the system. + // + // This field is Windows-specific and always zero for Linux containers. + NumProcs uint32 `json:"num_procs"` + + // StorageStats is the disk I/O stats for read/write on Windows. + // + // This type is Windows-specific and omitted for Linux containers. + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // ------------------------------------------------------------------------- + // PreRead and PreCPUStats contain the previous sample of stats for + // the container, and can be used to perform delta-calculation. + // ------------------------------------------------------------------------- + + // PreRead is the date and time at which this first sample was collected. + // This field is not propagated if the "one-shot" option is set. If the + // "one-shot" option is set, this field may be omitted, empty, or set + // to a default date (`0001-01-01T00:00:00Z`). + PreRead time.Time `json:"preread"` + + // PreCPUStats contains the CPUStats of the previous sample. + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/top_response.go b/vendor/github.com/moby/moby/api/types/container/top_response.go new file mode 100644 index 000000000..966603617 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/top_response.go @@ -0,0 +1,23 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TopResponse ContainerTopResponse +// +// Container "top" response. +// +// swagger:model TopResponse +type TopResponse struct { + + // Each process running in the container, where each process + // is an array of values corresponding to the titles. + // Example: {"Processes":[["root","13642","882","0","17:03","pts/0","00:00:00","/bin/bash"],["root","13735","13642","0","17:06","pts/0","00:00:00","sleep 10"]]} + Processes [][]string `json:"Processes"` + + // The ps column titles + // Example: {"Titles":["UID","PID","PPID","C","STIME","TTY","TIME","CMD"]} + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/update_response.go b/vendor/github.com/moby/moby/api/types/container/update_response.go new file mode 100644 index 000000000..2f7263b14 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/update_response.go @@ -0,0 +1,18 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// UpdateResponse ContainerUpdateResponse +// +// Response for a successful container-update. +// +// swagger:model UpdateResponse +type UpdateResponse struct { + + // Warnings encountered when updating the container. + // Example: ["Published ports are discarded when using host network mode"] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go new file mode 100644 index 000000000..96a7770c3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/wait_response.go b/vendor/github.com/moby/moby/api/types/container/wait_response.go new file mode 100644 index 000000000..68d3c3872 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/wait_response.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// # OK response to ContainerWait operation +// +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/waitcondition.go b/vendor/github.com/moby/moby/api/types/container/waitcondition.go new file mode 100644 index 000000000..64820fe35 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/moby/moby/api/types/events/events.go b/vendor/github.com/moby/moby/api/types/events/events.go new file mode 100644 index 000000000..b8393addd --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/events/events.go @@ -0,0 +1,121 @@ +package events + +// Type is used for event-types. +type Type string + +// List of known event types. +const ( + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. +) + +// Action is used for event-actions. +type Action string + +const ( + ActionCreate Action = "create" + ActionStart Action = "start" + ActionRestart Action = "restart" + ActionStop Action = "stop" + ActionCheckpoint Action = "checkpoint" + ActionPause Action = "pause" + ActionUnPause Action = "unpause" + ActionAttach Action = "attach" + ActionDetach Action = "detach" + ActionResize Action = "resize" + ActionUpdate Action = "update" + ActionRename Action = "rename" + ActionKill Action = "kill" + ActionDie Action = "die" + ActionOOM Action = "oom" + ActionDestroy Action = "destroy" + ActionRemove Action = "remove" + ActionCommit Action = "commit" + ActionTop Action = "top" + ActionCopy Action = "copy" + ActionArchivePath Action = "archive-path" + ActionExtractToDir Action = "extract-to-dir" + ActionExport Action = "export" + ActionImport Action = "import" + ActionSave Action = "save" + ActionLoad Action = "load" + ActionTag Action = "tag" + ActionUnTag Action = "untag" + ActionPush Action = "push" + ActionPull Action = "pull" + ActionPrune Action = "prune" + ActionDelete Action = "delete" + ActionEnable Action = "enable" + ActionDisable Action = "disable" + ActionConnect Action = "connect" + ActionDisconnect Action = "disconnect" + ActionReload Action = "reload" + ActionMount Action = "mount" + ActionUnmount Action = "unmount" + + // ActionExecCreate is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_create: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecCreate Action = "exec_create" + // ActionExecStart is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_start: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecStart Action = "exec_start" + ActionExecDie Action = "exec_die" + ActionExecDetach Action = "exec_detach" + + // ActionHealthStatus is the prefix to use for health_status events. + // + // Health-status events can either have a pre-defined status, in which + // case the "health_status" action is followed by a colon, or can be + // "free-form", in which case they're followed by the output of the + // health-check output. + // + // This is far form ideal, and a compromise to allow filtering, and + // to preserve backward-compatibility. + ActionHealthStatus Action = "health_status" + ActionHealthStatusRunning Action = "health_status: running" + ActionHealthStatusHealthy Action = "health_status: healthy" + ActionHealthStatusUnhealthy Action = "health_status: unhealthy" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set of attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + Type Type + Action Action + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/delete_response.go b/vendor/github.com/moby/moby/api/types/image/delete_response.go new file mode 100644 index 000000000..b19119a38 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/delete_response.go @@ -0,0 +1,18 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DeleteResponse delete response +// +// swagger:model DeleteResponse +type DeleteResponse struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/disk_usage.go b/vendor/github.com/moby/moby/api/types/image/disk_usage.go new file mode 100644 index 000000000..7297813c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for image resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active images. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of image summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing unused images. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all images. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by images. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/history_response_item.go b/vendor/github.com/moby/moby/api/types/image/history_response_item.go new file mode 100644 index 000000000..3de3181ab --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/history_response_item.go @@ -0,0 +1,38 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HistoryResponseItem HistoryResponseItem +// +// individual image layer information in response to ImageHistory operation +// +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/image.go b/vendor/github.com/moby/moby/api/types/image/image.go new file mode 100644 index 000000000..1c8990ae9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image.go @@ -0,0 +1,18 @@ +package image + +import ( + "time" +) + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// PruneReport contains the response for Engine API: +// POST "/images/prune" +type PruneReport struct { + ImagesDeleted []DeleteResponse + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/image/image_inspect.go b/vendor/github.com/moby/moby/api/types/image/image_inspect.go new file mode 100644 index 000000000..66a277e55 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image_inspect.go @@ -0,0 +1,108 @@ +package image + +import ( + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// InspectResponse contains response of Engine API: +// GET "/images/{name:.*}/json" +type InspectResponse struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Comment is an optional message that can be set when committing or + // importing the image. This field is omitted if not set. + Comment string `json:",omitempty"` + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + // + // This information is only available if present in the image, + // and omitted otherwise. + Created string `json:",omitempty"` + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + // This field is omitted if not set. + Author string `json:",omitempty"` + Config *dockerspec.DockerOCIImageConfig + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata Metadata + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // Only available if the daemon provides a multi-platform image store, the client + // requests manifests AND does not request a specific platform. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/manifest.go b/vendor/github.com/moby/moby/api/types/image/manifest.go new file mode 100644 index 000000000..db8a00830 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/manifest.go @@ -0,0 +1,99 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestKind string + +const ( + ManifestKindImage ManifestKind = "image" + ManifestKindAttestation ManifestKind = "attestation" + ManifestKindUnknown ManifestKind = "unknown" +) + +type ManifestSummary struct { + // ID is the content-addressable ID of an image and is the same as the + // digest of the image manifest. + // + // Required: true + ID string `json:"ID"` + + // Descriptor is the OCI descriptor of the image. + // + // Required: true + Descriptor ocispec.Descriptor `json:"Descriptor"` + + // Indicates whether all the child content (image config, layers) is + // fully available locally + // + // Required: true + Available bool `json:"Available"` + + // Size is the size information of the content related to this manifest. + // Note: These sizes only take the locally available content into account. + // + // Required: true + Size struct { + // Content is the size (in bytes) of all the locally present + // content in the content store (e.g. image config, layers) + // referenced by this manifest and its children. + // This only includes blobs in the content store. + Content int64 `json:"Content"` + + // Total is the total size (in bytes) of all the locally present + // data (both distributable and non-distributable) that's related to + // this manifest and its children. + // This equal to the sum of [Content] size AND all the sizes in the + // [Size] struct present in the Kind-specific data struct. + // For example, for an image kind (Kind == ManifestKindImage), + // this would include the size of the image content and unpacked + // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Total int64 `json:"Total"` + } `json:"Size"` + + // Kind is the kind of the image manifest. + // + // Required: true + Kind ManifestKind `json:"Kind"` + + // Fields below are specific to the kind of the image manifest. + + // Present only if Kind == ManifestKindImage. + ImageData *ImageProperties `json:"ImageData,omitempty"` + + // Present only if Kind == ManifestKindAttestation. + AttestationData *AttestationProperties `json:"AttestationData,omitempty"` +} + +type ImageProperties struct { + // Platform is the OCI platform object describing the platform of the image. + // + // Required: true + Platform ocispec.Platform `json:"Platform"` + + Size struct { + // Unpacked is the size (in bytes) of the locally unpacked + // (uncompressed) image content that's directly usable by the containers + // running this image. + // It's independent of the distributable content - e.g. + // the image might still have an unpacked data that's still used by + // some container even when the distributable/compressed content is + // already gone. + // + // Required: true + Unpacked int64 `json:"Unpacked"` + } + + // Containers is an array containing the IDs of the containers that are + // using this image. + // + // Required: true + Containers []string `json:"Containers"` +} + +type AttestationProperties struct { + // For is the digest of the image manifest that this attestation is for. + For digest.Digest `json:"For"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/summary.go b/vendor/github.com/moby/moby/api/types/image/summary.go new file mode 100644 index 000000000..3d4dd165a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/summary.go @@ -0,0 +1,95 @@ +package image + +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + +type Summary struct { + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // + // Required: true + Containers int64 `json:"Containers"` + + // Date and time at which the image was created as a Unix timestamp + // (number of seconds since EPOCH). + // + // Required: true + Created int64 `json:"Created"` + + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // + // Required: true + ID string `json:"Id"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // + // Required: true + ParentID string `json:"ParentId"` + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // + // Required: true + RepoTags []string `json:"RepoTags"` + + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // + // Required: true + SharedSize int64 `json:"SharedSize"` + + // Total size of the image including all layers it is composed of. + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go new file mode 100644 index 000000000..632b25fdf --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go @@ -0,0 +1,12 @@ +package jsonstream + +// Error wraps a concrete Code and Message, Code is +// an integer error code, Message is the error message. +type Error struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *Error) Error() string { + return e.Message +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/message.go b/vendor/github.com/moby/moby/api/types/jsonstream/message.go new file mode 100644 index 000000000..2e1346d41 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/message.go @@ -0,0 +1,15 @@ +package jsonstream + +import "encoding/json" + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type Message struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *Progress `json:"progressDetail,omitempty"` + ID string `json:"id,omitempty"` + Error *Error `json:"errorDetail,omitempty"` + Aux *json.RawMessage `json:"aux,omitempty"` // Aux contains out-of-band data, such as digests for push signing and image id after building. +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/progress.go b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go new file mode 100644 index 000000000..5c38b3b5e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go @@ -0,0 +1,10 @@ +package jsonstream + +// Progress describes a progress message in a JSON stream. +type Progress struct { + Current int64 `json:"current,omitempty"` // Current is the current status and value of the progress made towards Total. + Total int64 `json:"total,omitempty"` // Total is the end value describing when we made 100% progress for an operation. + Start int64 `json:"start,omitempty"` // Start is the initial value for the operation. + HideCounts bool `json:"hidecounts,omitempty"` // HideCounts. if true, hides the progress count indicator (xB/yB). + Units string `json:"units,omitempty"` // Units is the unit to print for progress. It defaults to "bytes" if empty. +} diff --git a/vendor/github.com/moby/moby/api/types/mount/mount.go b/vendor/github.com/moby/moby/api/types/mount/mount.go new file mode 100644 index 000000000..090d436c6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/mount/mount.go @@ -0,0 +1,157 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" + // TypeImage is the type for mounting another image's filesystem + TypeImage Type = "image" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + ImageOptions *ImageOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` + // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive + // (unless NonRecursive is set to true in conjunction). + ReadOnlyNonRecursive bool `json:",omitempty"` + // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. + ReadOnlyForceRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Subpath string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +type ImageOptions struct { + Subpath string `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + // Options to be passed to the tmpfs mount. An array of arrays. Flag + // options should be provided as 1-length arrays. Other types should be + // provided as 2-length arrays, where the first item is the key and the + // second the value. + Options [][]string `json:",omitempty"` + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/moby/moby/api/types/network/config_reference.go b/vendor/github.com/moby/moby/api/types/network/config_reference.go new file mode 100644 index 000000000..1158afe65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/config_reference.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConfigReference The config-only network source to provide the configuration for +// this network. +// +// swagger:model ConfigReference +type ConfigReference struct { + + // The name of the config-only network that provides the network's + // configuration. The specified network must be an existing config-only + // network. Only network names are allowed, not network IDs. + // + // Example: config_only_network_01 + Network string `json:"Network"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/connect_request.go b/vendor/github.com/moby/moby/api/types/network/connect_request.go new file mode 100644 index 000000000..2ff14d360 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/connect_request.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConnectRequest NetworkConnectRequest represents the data to be used to connect a container to a network. +// +// swagger:model ConnectRequest +type ConnectRequest struct { + + // The ID or name of the container to connect to the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // endpoint config + EndpointConfig *EndpointSettings `json:"EndpointConfig,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/create_response.go b/vendor/github.com/moby/moby/api/types/network/create_response.go new file mode 100644 index 000000000..199705991 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/create_response.go @@ -0,0 +1,23 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse NetworkCreateResponse +// +// # OK response to NetworkCreate operation +// +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created network. + // Example: b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warning string `json:"Warning"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/disconnect_request.go b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go new file mode 100644 index 000000000..7b1f521e7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DisconnectRequest NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. +// +// swagger:model DisconnectRequest +type DisconnectRequest struct { + + // The ID or name of the container to disconnect from the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // Force the container to disconnect from the network. + // Example: false + Force bool `json:"Force"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint.go b/vendor/github.com/moby/moby/api/types/network/endpoint.go new file mode 100644 index 000000000..cd39c579f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint.go @@ -0,0 +1,74 @@ +package network + +import ( + "maps" + "net/netip" + "slices" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configuration data + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + DriverOpts map[string]string + + // GwPriority determines which endpoint will provide the default gateway + // for the container. The endpoint with the highest priority will be used. + // If multiple endpoints have the same priority, they are lexicographically + // sorted based on their network name, and the one that sorts first is picked. + GwPriority int + + // Operational data + + NetworkID string + EndpointID string + Gateway netip.Addr + IPAddress netip.Addr + + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress HardwareAddr + IPPrefixLen int + IPv6Gateway netip.Addr + GlobalIPv6Address netip.Addr + GlobalIPv6PrefixLen int + // DNSNames holds all the (non fully qualified) DNS names associated to this + // endpoint. The first entry is used to generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + if es == nil { + return nil + } + + epCopy := *es + epCopy.IPAMConfig = es.IPAMConfig.Copy() + epCopy.Links = slices.Clone(es.Links) + epCopy.Aliases = slices.Clone(es.Aliases) + epCopy.DNSNames = slices.Clone(es.DNSNames) + epCopy.DriverOpts = maps.Clone(es.DriverOpts) + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address netip.Addr `json:",omitempty"` + IPv6Address netip.Addr `json:",omitempty"` + LinkLocalIPs []netip.Addr `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + if cfg == nil { + return nil + } + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = slices.Clone(cfg.LinkLocalIPs) + return &cfgCopy +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go new file mode 100644 index 000000000..bf493ad5d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go @@ -0,0 +1,35 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// EndpointResource contains network resources allocated and used for a container in a network. +// +// swagger:model EndpointResource +type EndpointResource struct { + + // name + // Example: container_1 + Name string `json:"Name"` + + // endpoint ID + // Example: 628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a + EndpointID string `json:"EndpointID"` + + // mac address + // Example: 02:42:ac:13:00:02 + MacAddress HardwareAddr `json:"MacAddress"` + + // IPv4 address + // Example: 172.19.0.2/16 + IPv4Address netip.Prefix `json:"IPv4Address"` + + // IPv6 address + IPv6Address netip.Prefix `json:"IPv6Address"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/hwaddr.go b/vendor/github.com/moby/moby/api/types/network/hwaddr.go new file mode 100644 index 000000000..9d0c2b4b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/hwaddr.go @@ -0,0 +1,37 @@ +package network + +import ( + "encoding" + "fmt" + "net" +) + +// A HardwareAddr represents a physical hardware address. +// It implements [encoding.TextMarshaler] and [encoding.TextUnmarshaler] +// in the absence of go.dev/issue/29678. +type HardwareAddr net.HardwareAddr + +var _ encoding.TextMarshaler = (HardwareAddr)(nil) +var _ encoding.TextUnmarshaler = (*HardwareAddr)(nil) +var _ fmt.Stringer = (HardwareAddr)(nil) + +func (m *HardwareAddr) UnmarshalText(text []byte) error { + if len(text) == 0 { + *m = nil + return nil + } + hw, err := net.ParseMAC(string(text)) + if err != nil { + return err + } + *m = HardwareAddr(hw) + return nil +} + +func (m HardwareAddr) MarshalText() ([]byte, error) { + return []byte(net.HardwareAddr(m).String()), nil +} + +func (m HardwareAddr) String() string { + return net.HardwareAddr(m).String() +} diff --git a/vendor/github.com/moby/moby/api/types/network/inspect.go b/vendor/github.com/moby/moby/api/types/network/inspect.go new file mode 100644 index 000000000..cded5e608 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/inspect.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Inspect The body of the "get network" http response message. +// +// swagger:model Inspect +type Inspect struct { + Network + + // Contains endpoints attached to the network. + // + // Example: {"19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c":{"EndpointID":"628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a","IPv4Address":"172.19.0.2/16","IPv6Address":"","MacAddress":"02:42:ac:13:00:02","Name":"test"}} + Containers map[string]EndpointResource `json:"Containers"` + + // List of services using the network. This field is only present for + // swarm scope networks, and omitted for local scope networks. + // + Services map[string]ServiceInfo `json:"Services,omitempty"` + + // provides runtime information about the network such as the number of allocated IPs. + // + Status *Status `json:"Status,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/ipam.go b/vendor/github.com/moby/moby/api/types/network/ipam.go new file mode 100644 index 000000000..e57be481b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam.go @@ -0,0 +1,22 @@ +package network + +import ( + "net/netip" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet netip.Prefix `json:",omitempty"` + IPRange netip.Prefix `json:",omitempty"` + Gateway netip.Addr `json:",omitempty"` + AuxAddress map[string]netip.Addr `json:"AuxiliaryAddresses,omitempty"` +} + +type SubnetStatuses = map[netip.Prefix]SubnetStatus diff --git a/vendor/github.com/moby/moby/api/types/network/ipam_status.go b/vendor/github.com/moby/moby/api/types/network/ipam_status.go new file mode 100644 index 000000000..7eb4e8487 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam_status.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IPAMStatus IPAM status +// +// swagger:model IPAMStatus +type IPAMStatus struct { + + // subnets + // Example: {"172.16.0.0/16":{"DynamicIPsAvailable":65533,"IPsInUse":3},"2001:db8:abcd:0012::0/96":{"DynamicIPsAvailable":4294967291,"IPsInUse":5}} + Subnets SubnetStatuses `json:"Subnets,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network.go b/vendor/github.com/moby/moby/api/types/network/network.go new file mode 100644 index 000000000..a7d9c0f6a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + timeext "time" +) + +// Network network +// +// swagger:model Network +type Network struct { + + // Name of the network. + // + // Example: my_network + Name string `json:"Name"` + + // ID that uniquely identifies a network on a single machine. + // + // Example: 7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 + ID string `json:"Id"` + + // Date and time at which the network was created in + // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + // + // Example: 2016-10-19T04:33:30.360899459Z + Created timeext.Time `json:"Created"` + + // The level at which the network exists (e.g. `swarm` for cluster-wide + // or `local` for machine level) + // + // Example: local + Scope string `json:"Scope"` + + // The name of the driver used to create the network (e.g. `bridge`, + // `overlay`). + // + // Example: overlay + Driver string `json:"Driver"` + + // Whether the network was created with IPv4 enabled. + // + // Example: true + EnableIPv4 bool `json:"EnableIPv4"` + + // Whether the network was created with IPv6 enabled. + // + // Example: false + EnableIPv6 bool `json:"EnableIPv6"` + + // The network's IP Address Management. + // + IPAM IPAM `json:"IPAM"` + + // Whether the network is created to only allow internal networking + // connectivity. + // + // Example: false + Internal bool `json:"Internal"` + + // Whether a global / swarm scope network is manually attachable by regular + // containers from workers in swarm mode. + // + // Example: false + Attachable bool `json:"Attachable"` + + // Whether the network is providing the routing-mesh for the swarm cluster. + // + // Example: false + Ingress bool `json:"Ingress"` + + // config from + ConfigFrom ConfigReference `json:"ConfigFrom"` + + // Whether the network is a config-only network. Config-only networks are + // placeholder networks for network configurations to be used by other + // networks. Config-only networks cannot be used directly to run containers + // or services. + // + ConfigOnly bool `json:"ConfigOnly"` + + // Network-specific options uses when creating the network. + // + // Example: {"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"} + Options map[string]string `json:"Options"` + + // Metadata specific to the network being created. + // + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + Labels map[string]string `json:"Labels"` + + // List of peer nodes for an overlay network. This field is only present + // for overlay networks, and omitted for other network types. + // + Peers []PeerInfo `json:"Peers,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network_types.go b/vendor/github.com/moby/moby/api/types/network/network_types.go new file mode 100644 index 000000000..5401f55f8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network_types.go @@ -0,0 +1,43 @@ +package network + +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + +// CreateRequest is the request message sent to the server for network create call. +type CreateRequest struct { + Name string // Name is the requested name of the network. + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. + IPAM *IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// PruneReport contains the response for Engine API: +// POST "/networks/prune" +type PruneReport struct { + NetworksDeleted []string +} diff --git a/vendor/github.com/moby/moby/api/types/network/peer_info.go b/vendor/github.com/moby/moby/api/types/network/peer_info.go new file mode 100644 index 000000000..dc88ec16f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/peer_info.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// PeerInfo represents one peer of an overlay network. +// +// swagger:model PeerInfo +type PeerInfo struct { + + // ID of the peer-node in the Swarm cluster. + // Example: 6869d7c1732b + Name string `json:"Name"` + + // IP-address of the peer-node in the Swarm cluster. + // Example: 10.133.77.91 + IP netip.Addr `json:"IP"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/port.go b/vendor/github.com/moby/moby/api/types/network/port.go new file mode 100644 index 000000000..171d9f51d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/port.go @@ -0,0 +1,346 @@ +package network + +import ( + "errors" + "fmt" + "iter" + "net/netip" + "strconv" + "strings" + "unique" +) + +// IPProtocol represents a network protocol for a port. +type IPProtocol string + +const ( + TCP IPProtocol = "tcp" + UDP IPProtocol = "udp" + SCTP IPProtocol = "sctp" +) + +// Sentinel port proto value for zero Port and PortRange values. +var protoZero unique.Handle[IPProtocol] + +// Port is a type representing a single port number and protocol in the format "/[]". +// +// The zero port value, i.e. Port{}, is invalid; use [ParsePort] to create a valid Port value. +type Port struct { + num uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePort parses s as a [Port]. +// +// It normalizes the provided protocol such that "80/tcp", "80/TCP", and "80/tCp" are equivalent. +// If a port number is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePort(s string) (Port, error) { + if s == "" { + return Port{}, errors.New("invalid port: value is empty") + } + + port, proto, _ := strings.Cut(s, "/") + + portNum, err := parsePortNumber(port) + if err != nil { + return Port{}, fmt.Errorf("invalid port '%s': %w", port, err) + } + + normalizedPortProto := normalizePortProto(proto) + return Port{num: portNum, proto: normalizedPortProto}, nil +} + +// MustParsePort calls [ParsePort](s) and panics on error. +// +// It is intended for use in tests with hard-coded strings. +func MustParsePort(s string) Port { + p, err := ParsePort(s) + if err != nil { + panic(err) + } + return p +} + +// PortFrom returns a [Port] with the given number and protocol. +// +// If no protocol is specified (i.e. proto == ""), then PortFrom returns Port{}, false. +func PortFrom(num uint16, proto IPProtocol) (p Port, ok bool) { + if proto == "" { + return Port{}, false + } + normalized := normalizePortProto(string(proto)) + return Port{num: num, proto: normalized}, true +} + +// Num returns p's port number. +func (p Port) Num() uint16 { + return p.num +} + +// Proto returns p's network protocol. +func (p Port) Proto() IPProtocol { + return p.proto.Value() +} + +// IsZero reports whether p is the zero value. +func (p Port) IsZero() bool { + return p.proto == protoZero +} + +// IsValid reports whether p is an initialized valid port (not the zero value). +func (p Port) IsValid() bool { + return p.proto != protoZero +} + +// String returns a string representation of the port in the format "/". +// If the port is the zero value, it returns "invalid port". +func (p Port) String() string { + switch p.proto { + case protoZero: + return "invalid port" + default: + return string(p.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [Port.AppendTo] but returns an error to satisfy the interface. +func (p Port) AppendText(b []byte) ([]byte, error) { + return p.AppendTo(b), nil +} + +// AppendTo appends a text encoding of p to b and returns the extended buffer. +func (p Port) AppendTo(b []byte) []byte { + if p.IsZero() { + return b + } + return fmt.Appendf(b, "%d/%s", p.num, p.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (p Port) MarshalText() ([]byte, error) { + return p.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (p *Port) UnmarshalText(text []byte) error { + if len(text) == 0 { + *p = Port{} + return nil + } + + port, err := ParsePort(string(text)) + if err != nil { + return err + } + + *p = port + return nil +} + +// Range returns a [PortRange] representing the single port. +func (p Port) Range() PortRange { + return PortRange{start: p.num, end: p.num, proto: p.proto} +} + +// PortSet is a collection of structs indexed by [Port]. +type PortSet = map[Port]struct{} + +// PortBinding represents a binding between a Host IP address and a Host Port. +type PortBinding struct { + // HostIP is the host IP Address + HostIP netip.Addr `json:"HostIp"` + // HostPort is the host port number + HostPort string `json:"HostPort"` +} + +// PortMap is a collection of [PortBinding] indexed by [Port]. +type PortMap = map[Port][]PortBinding + +// PortRange represents a range of port numbers and a protocol in the format "8000-9000/tcp". +// +// The zero port range value, i.e. PortRange{}, is invalid; use [ParsePortRange] to create a valid PortRange value. +type PortRange struct { + start uint16 + end uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePortRange parses s as a [PortRange]. +// +// It normalizes the provided protocol such that "80-90/tcp", "80-90/TCP", and "80-90/tCp" are equivalent. +// If a port number range is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePortRange(s string) (PortRange, error) { + if s == "" { + return PortRange{}, errors.New("invalid port range: value is empty") + } + + portRange, proto, _ := strings.Cut(s, "/") + + start, end, ok := strings.Cut(portRange, "-") + startVal, err := parsePortNumber(start) + if err != nil { + return PortRange{}, fmt.Errorf("invalid start port '%s': %w", start, err) + } + + portProto := normalizePortProto(proto) + + if !ok || start == end { + return PortRange{start: startVal, end: startVal, proto: portProto}, nil + } + + endVal, err := parsePortNumber(end) + if err != nil { + return PortRange{}, fmt.Errorf("invalid end port '%s': %w", end, err) + } + if endVal < startVal { + return PortRange{}, errors.New("invalid port range: " + s) + } + return PortRange{start: startVal, end: endVal, proto: portProto}, nil +} + +// MustParsePortRange calls [ParsePortRange](s) and panics on error. +// It is intended for use in tests with hard-coded strings. +func MustParsePortRange(s string) PortRange { + pr, err := ParsePortRange(s) + if err != nil { + panic(err) + } + return pr +} + +// PortRangeFrom returns a [PortRange] with the given start and end port numbers and protocol. +// +// If end < start or no protocol is specified (i.e. proto == ""), then PortRangeFrom returns PortRange{}, false. +func PortRangeFrom(start, end uint16, proto IPProtocol) (pr PortRange, ok bool) { + if end < start || proto == "" { + return PortRange{}, false + } + normalized := normalizePortProto(string(proto)) + return PortRange{start: start, end: end, proto: normalized}, true +} + +// Start returns pr's start port number. +func (pr PortRange) Start() uint16 { + return pr.start +} + +// End returns pr's end port number. +func (pr PortRange) End() uint16 { + return pr.end +} + +// Proto returns pr's network protocol. +func (pr PortRange) Proto() IPProtocol { + return pr.proto.Value() +} + +// IsZero reports whether pr is the zero value. +func (pr PortRange) IsZero() bool { + return pr.proto == protoZero +} + +// IsValid reports whether pr is an initialized valid port range (not the zero value). +func (pr PortRange) IsValid() bool { + return pr.proto != protoZero +} + +// String returns a string representation of the port range in the format "-/" or "/" if start == end. +// If the port range is the zero value, it returns "invalid port range". +func (pr PortRange) String() string { + switch pr.proto { + case protoZero: + return "invalid port range" + default: + return string(pr.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [PortRange.AppendTo] but returns an error to satisfy the interface. +func (pr PortRange) AppendText(b []byte) ([]byte, error) { + return pr.AppendTo(b), nil +} + +// AppendTo appends a text encoding of pr to b and returns the extended buffer. +func (pr PortRange) AppendTo(b []byte) []byte { + if pr.IsZero() { + return b + } + if pr.start == pr.end { + return fmt.Appendf(b, "%d/%s", pr.start, pr.proto.Value()) + } + return fmt.Appendf(b, "%d-%d/%s", pr.start, pr.end, pr.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (pr PortRange) MarshalText() ([]byte, error) { + return pr.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (pr *PortRange) UnmarshalText(text []byte) error { + if len(text) == 0 { + *pr = PortRange{} + return nil + } + + portRange, err := ParsePortRange(string(text)) + if err != nil { + return err + } + *pr = portRange + return nil +} + +// Range returns pr. +func (pr PortRange) Range() PortRange { + return pr +} + +// All returns an iterator over all the individual ports in the range. +// +// For example: +// +// for port := range pr.All() { +// // ... +// } +func (pr PortRange) All() iter.Seq[Port] { + return func(yield func(Port) bool) { + for i := uint32(pr.Start()); i <= uint32(pr.End()); i++ { + if !yield(Port{num: uint16(i), proto: pr.proto}) { + return + } + } + } +} + +// parsePortNumber parses rawPort into an int, unwrapping strconv errors +// and returning a single "out of range" error for any value outside 0–65535. +func parsePortNumber(rawPort string) (uint16, error) { + if rawPort == "" { + return 0, errors.New("value is empty") + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + var numErr *strconv.NumError + if errors.As(err, &numErr) { + err = numErr.Err + } + return 0, err + } + + return uint16(port), nil +} + +// normalizePortProto normalizes the protocol string such that "tcp", "TCP", and "tCp" are equivalent. +// If proto is not specified, it defaults to "tcp". +func normalizePortProto(proto string) unique.Handle[IPProtocol] { + if proto == "" { + return unique.Make(TCP) + } + + proto = strings.ToLower(proto) + + return unique.Make(IPProtocol(proto)) +} diff --git a/vendor/github.com/moby/moby/api/types/network/service_info.go b/vendor/github.com/moby/moby/api/types/network/service_info.go new file mode 100644 index 000000000..fdd92f161 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/service_info.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// ServiceInfo represents service parameters with the list of service's tasks +// +// swagger:model ServiceInfo +type ServiceInfo struct { + + // v IP + VIP netip.Addr `json:"VIP"` + + // ports + Ports []string `json:"Ports"` + + // local l b index + LocalLBIndex int `json:"LocalLBIndex"` + + // tasks + Tasks []Task `json:"Tasks"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/status.go b/vendor/github.com/moby/moby/api/types/network/status.go new file mode 100644 index 000000000..94f4b4b2e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/status.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Status provides runtime information about the network such as the number of allocated IPs. +// +// swagger:model Status +type Status struct { + + // IPAM + IPAM IPAMStatus `json:"IPAM"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/subnet_status.go b/vendor/github.com/moby/moby/api/types/network/subnet_status.go new file mode 100644 index 000000000..dd62429f5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/subnet_status.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// SubnetStatus subnet status +// +// swagger:model SubnetStatus +type SubnetStatus struct { + + // Number of IP addresses in the subnet that are in use or reserved and are therefore unavailable for allocation, saturating at 264 - 1. + // + IPsInUse uint64 `json:"IPsInUse"` + + // Number of IP addresses within the network's IPRange for the subnet that are available for allocation, saturating at 264 - 1. + // + DynamicIPsAvailable uint64 `json:"DynamicIPsAvailable"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/summary.go b/vendor/github.com/moby/moby/api/types/network/summary.go new file mode 100644 index 000000000..3f50ce227 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/summary.go @@ -0,0 +1,13 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Summary Network list response item +// +// swagger:model Summary +type Summary struct { + Network +} diff --git a/vendor/github.com/moby/moby/api/types/network/task.go b/vendor/github.com/moby/moby/api/types/network/task.go new file mode 100644 index 000000000..a547523a4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/task.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// Task carries the information about one backend task +// +// swagger:model Task +type Task struct { + + // name + Name string `json:"Name"` + + // endpoint ID + EndpointID string `json:"EndpointID"` + + // endpoint IP + EndpointIP netip.Addr `json:"EndpointIP"` + + // info + Info map[string]string `json:"Info"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/.gitignore b/vendor/github.com/moby/moby/api/types/plugin/.gitignore new file mode 100644 index 000000000..5cea8434d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/.gitignore @@ -0,0 +1 @@ +testdata/rapid/** diff --git a/vendor/github.com/moby/moby/api/types/plugin/capability.go b/vendor/github.com/moby/moby/api/types/plugin/capability.go new file mode 100644 index 000000000..d53f77a1f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/capability.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "bytes" + "encoding" + "fmt" + "strings" +) + +type CapabilityID struct { + Capability string + Prefix string + Version string +} + +var ( + _ fmt.Stringer = CapabilityID{} + _ encoding.TextUnmarshaler = (*CapabilityID)(nil) + _ encoding.TextMarshaler = CapabilityID{} +) + +// String implements [fmt.Stringer] for CapabilityID +func (t CapabilityID) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] for CapabilityID +func (t *CapabilityID) UnmarshalText(p []byte) error { + fqcap, version, _ := bytes.Cut(p, []byte{'/'}) + idx := bytes.LastIndexByte(fqcap, '.') + if idx < 0 { + t.Prefix = "" + t.Capability = string(fqcap) + } else { + t.Prefix = string(fqcap[:idx]) + t.Capability = string(fqcap[idx+1:]) + } + t.Version = string(version) + return nil +} + +// MarshalText implements [encoding.TextMarshaler] for CapabilityID +func (t CapabilityID) MarshalText() ([]byte, error) { + // Assert that the value can be round-tripped successfully. + if strings.Contains(t.Capability, ".") { + return nil, fmt.Errorf("capability %q cannot contain a dot", t.Capability) + } + if strings.Contains(t.Prefix, "/") { + return nil, fmt.Errorf("prefix %q cannot contain a slash", t.Prefix) + } + if strings.Contains(t.Capability, "/") { + return nil, fmt.Errorf("capability %q cannot contain a slash", t.Capability) + } + return []byte(t.String()), nil +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/device.go b/vendor/github.com/moby/moby/api/types/plugin/device.go new file mode 100644 index 000000000..ae9617704 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/device.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Device device +// +// swagger:model Device +type Device struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Example: /dev/fuse + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/env.go b/vendor/github.com/moby/moby/api/types/plugin/env.go new file mode 100644 index 000000000..dcbe0b762 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/env.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Env env +// +// swagger:model Env +type Env struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/mount.go b/vendor/github.com/moby/moby/api/types/plugin/mount.go new file mode 100644 index 000000000..7970306cc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/mount.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Mount mount +// +// swagger:model Mount +type Mount struct { + + // description + // Example: This is a mount that's used by the plugin. + // Required: true + Description string `json:"Description"` + + // destination + // Example: /mnt/state + // Required: true + Destination string `json:"Destination"` + + // name + // Example: some-mount + // Required: true + Name string `json:"Name"` + + // options + // Example: ["rbind","rw"] + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Example: /var/lib/docker/plugins/ + // Required: true + Source *string `json:"Source"` + + // type + // Example: bind + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/plugin.go b/vendor/github.com/moby/moby/api/types/plugin/plugin.go new file mode 100644 index 000000000..3305170d5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin.go @@ -0,0 +1,237 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config Config `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Example: true + // Required: true + Enabled bool `json:"Enabled"` + + // Id + // Example: 5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078 + ID string `json:"Id,omitempty"` + + // name + // Example: tiborvass/sample-volume-plugin + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + // Example: localhost:5000/tiborvass/sample-volume-plugin:latest + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings Settings `json:"Settings"` +} + +// Config The config of a plugin. +// +// swagger:model Config +type Config struct { + + // args + // Required: true + Args Args `json:"Args"` + + // description + // Example: A sample volume plugin for Docker + // Required: true + Description string `json:"Description"` + + // documentation + // Example: https://docs.docker.com/engine/extend/plugins/ + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Example: ["/usr/bin/sample-volume-plugin","/data"] + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Example: [{"Description":"If set, prints debug messages","Name":"DEBUG","Settable":null,"Value":"0"}] + // Required: true + Env []Env `json:"Env"` + + // interface + // Required: true + Interface Interface `json:"Interface"` + + // ipc host + // Example: false + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux LinuxConfig `json:"Linux"` + + // mounts + // Required: true + Mounts []Mount `json:"Mounts"` + + // network + // Required: true + Network NetworkConfig `json:"Network"` + + // pid host + // Example: false + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Example: /mnt/volumes + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User User `json:"User,omitempty"` + + // work dir + // Example: /bin/ + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *RootFS `json:"rootfs,omitempty"` +} + +// Args args +// +// swagger:model Args +type Args struct { + + // description + // Example: command line arguments + // Required: true + Description string `json:"Description"` + + // name + // Example: args + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// Interface The interface between Docker and the plugin +// +// swagger:model Interface +type Interface struct { + + // Protocol to use for clients connecting to the plugin. + // Example: some.protocol/v1.0 + // Enum: ["","moby.plugins.http/v1"] + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Example: plugins.sock + // Required: true + Socket string `json:"Socket"` + + // types + // Example: ["docker.volumedriver/1.0"] + // Required: true + Types []CapabilityID `json:"Types"` +} + +// LinuxConfig linux config +// +// swagger:model LinuxConfig +type LinuxConfig struct { + + // allow all devices + // Example: false + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Example: ["CAP_SYS_ADMIN","CAP_SYSLOG"] + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []Device `json:"Devices"` +} + +// NetworkConfig network config +// +// swagger:model NetworkConfig +type NetworkConfig struct { + + // type + // Example: host + // Required: true + Type string `json:"Type"` +} + +// RootFS root f s +// +// swagger:model RootFS +type RootFS struct { + + // diff ids + // Example: ["sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887","sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"] + DiffIds []string `json:"diff_ids"` + + // type + // Example: layers + Type string `json:"type,omitempty"` +} + +// User user +// +// swagger:model User +type User struct { + + // g ID + // Example: 1000 + GID uint32 `json:"GID,omitempty"` + + // UID + // Example: 1000 + UID uint32 `json:"UID,omitempty"` +} + +// Settings user-configurable settings for the plugin. +// +// swagger:model Settings +type Settings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []Device `json:"Devices"` + + // env + // Example: ["DEBUG=0"] + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []Mount `json:"Mounts"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go new file mode 100644 index 000000000..91b327eb4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "sort" +) + +// ListResponse contains the response for the Engine API +type ListResponse []Plugin + +// Privilege describes a permission the user has to accept +// upon installing a plugin. +type Privilege struct { + Name string + Description string + Value []string +} + +// Privileges is a list of Privilege +type Privileges []Privilege + +func (s Privileges) Len() int { + return len(s) +} + +func (s Privileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s Privileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/api/types/registry/auth_response.go b/vendor/github.com/moby/moby/api/types/registry/auth_response.go new file mode 100644 index 000000000..94c2e1bb3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/auth_response.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package registry + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// AuthResponse An identity token was generated successfully. +// +// swagger:model AuthResponse +type AuthResponse struct { + + // An opaque token used to authenticate a user after a successful login + // Example: 9cbaf023786cd7... + IdentityToken string `json:"IdentityToken,omitempty"` + + // The status of the authentication + // Example: Login Succeeded + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/authconfig.go b/vendor/github.com/moby/moby/api/types/registry/authconfig.go new file mode 100644 index 000000000..b612feeba --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/authconfig.go @@ -0,0 +1,35 @@ +package registry + +import "context" + +// AuthHeader is the name of the header used to send encoded registry +// authorization credentials for registry operations (push/pull). +const AuthHeader = "X-Registry-Auth" + +// RequestAuthConfig is a function interface that clients can supply +// to retry operations after getting an authorization error. +// +// The function must return the [AuthHeader] value ([AuthConfig]), encoded +// in base64url format ([RFC4648, section 5]), which can be decoded by +// [DecodeAuthConfig]. +// +// It must return an error if the privilege request fails. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +type RequestAuthConfig func(context.Context) (string, error) + +// AuthConfig contains authorization information for connecting to a Registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/registry.go b/vendor/github.com/moby/moby/api/types/registry/registry.go new file mode 100644 index 000000000..7361228d6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/registry.go @@ -0,0 +1,67 @@ +package registry + +import ( + "net/netip" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []netip.Prefix `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor ocispec.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/api/types/registry/search.go b/vendor/github.com/moby/moby/api/types/registry/search.go new file mode 100644 index 000000000..bd79462f6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/search.go @@ -0,0 +1,27 @@ +package registry + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false". + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/driver_data.go b/vendor/github.com/moby/moby/api/types/storage/driver_data.go new file mode 100644 index 000000000..65d5b4c20 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/driver_data.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DriverData Information about the storage driver used to store the container's and +// image's filesystem. +// +// swagger:model DriverData +type DriverData struct { + + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // + // Example: {"MergedDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged","UpperDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff","WorkDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"} + // Required: true + Data map[string]string `json:"Data"` + + // Name of the storage driver. + // Example: overlay2 + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go new file mode 100644 index 000000000..d82f2b6bc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorage Information about the storage used for the container's root filesystem. +// +// swagger:model RootFSStorage +type RootFSStorage struct { + + // Information about the snapshot used for the container's root filesystem. + // + Snapshot *RootFSStorageSnapshot `json:"Snapshot,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go new file mode 100644 index 000000000..dd2b82d24 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorageSnapshot Information about a snapshot backend of the container's root filesystem. +// +// swagger:model RootFSStorageSnapshot +type RootFSStorageSnapshot struct { + + // Name of the snapshotter. + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/storage.go b/vendor/github.com/moby/moby/api/types/storage/storage.go new file mode 100644 index 000000000..77843db97 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Storage Information about the storage used by the container. +// +// swagger:model Storage +type Storage struct { + + // Information about the storage used for the container's root filesystem. + // + RootFS *RootFSStorage `json:"RootFS,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/common.go b/vendor/github.com/moby/moby/api/types/swarm/common.go new file mode 100644 index 000000000..b42812e03 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/common.go @@ -0,0 +1,48 @@ +package swarm + +import ( + "strconv" + "time" +) + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/config.go b/vendor/github.com/moby/moby/api/types/swarm/config.go new file mode 100644 index 000000000..b029f2af8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/config.go @@ -0,0 +1,55 @@ +package swarm + +import ( + "os" +) + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + + // Data is the data to store as a config. + // + // The maximum allowed size is 1000KB, as defined in [MaxConfigSize]. + // + // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/container.go b/vendor/github.com/moby/moby/api/types/swarm/container.go new file mode 100644 index 000000000..268565ec8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/container.go @@ -0,0 +1,120 @@ +package swarm + +import ( + "net/netip" + "time" + + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []netip.Addr `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// SeccompMode is the type used for the enumeration of possible seccomp modes +// in SeccompOpts +type SeccompMode string + +const ( + SeccompModeDefault SeccompMode = "default" + SeccompModeUnconfined SeccompMode = "unconfined" + SeccompModeCustom SeccompMode = "custom" +) + +// SeccompOpts defines the options for configuring seccomp on a swarm-managed +// container. +type SeccompOpts struct { + // Mode is the SeccompMode used for the container. + Mode SeccompMode `json:",omitempty"` + // Profile is the custom seccomp profile as a json object to be used with + // the container. Mode should be set to SeccompModeCustom when using a + // custom profile in this manner. + Profile []byte `json:",omitempty"` +} + +// AppArmorMode is type used for the enumeration of possible AppArmor modes in +// AppArmorOpts +type AppArmorMode string + +const ( + AppArmorModeDefault AppArmorMode = "default" + AppArmorModeDisabled AppArmorMode = "disabled" +) + +// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed +// container. Currently, custom AppArmor profiles are not supported. +type AppArmorOpts struct { + Mode AppArmorMode `json:",omitempty"` +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext + Seccomp *SeccompOpts `json:",omitempty"` + AppArmor *AppArmorOpts `json:",omitempty"` + NoNewPrivileges bool +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*container.Ulimit `json:",omitempty"` + OomScoreAdj int64 `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/network.go b/vendor/github.com/moby/moby/api/types/swarm/network.go new file mode 100644 index 000000000..65aabc9d3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/network.go @@ -0,0 +1,117 @@ +package swarm + +import ( + "net/netip" + + "github.com/moby/moby/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol network.IPProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + + // Addr is the virtual ip address. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addr netip.Prefix `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + + // Addresses contains the IP addresses associated with the endpoint in the network. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addresses []netip.Prefix `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet netip.Prefix `json:",omitempty"` + Range netip.Prefix `json:",omitempty"` + Gateway netip.Addr `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/node.go b/vendor/github.com/moby/moby/api/types/swarm/node.go new file mode 100644 index 000000000..9523799b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/node.go @@ -0,0 +1,139 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// [github.com/moby/moby/api/types/volume.Topology]. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime.go b/vendor/github.com/moby/moby/api/types/swarm/runtime.go new file mode 100644 index 000000000..23ea712c4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime.go @@ -0,0 +1,45 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} + +// RuntimeSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type RuntimeSpec struct { + Name string `json:"name,omitempty"` + Remote string `json:"remote,omitempty"` + Privileges []*RuntimePrivilege `json:"privileges,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Env []string `json:"env,omitempty"` +} + +// RuntimePrivilege describes a permission the user has to accept +// upon installing a plugin. +type RuntimePrivilege struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Value []string `json:"value,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/secret.go b/vendor/github.com/moby/moby/api/types/swarm/secret.go new file mode 100644 index 000000000..0e27ed9b0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/secret.go @@ -0,0 +1,59 @@ +package swarm + +import ( + "os" +) + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + + // Data is the data to store as a secret. It must be empty if a + // [Driver] is used, in which case the data is loaded from an external + // secret store. The maximum allowed size is 500KB, as defined in + // [MaxSecretSize]. + // + // This field is only used to create the secret, and is not returned + // by other endpoints. + // + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize + Data []byte `json:",omitempty"` + + // Driver is the name of the secrets driver used to fetch the secret's + // value from an external secret store. If not set, the default built-in + // store is used. + Driver *Driver `json:",omitempty"` + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service.go b/vendor/github.com/moby/moby/api/types/swarm/service.go new file mode 100644 index 000000000..0b678dea3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service.go @@ -0,0 +1,218 @@ +package swarm + +import ( + "time" +) + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +// FailureAction is the action to perform when updating a service fails. +type FailureAction string + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause FailureAction = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue FailureAction = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback FailureAction = "rollback" +) + +// UpdateOrder is the order of operations when rolling out or rolling back +// an updated tasks for a service. +type UpdateOrder string + +const ( + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst UpdateOrder = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst UpdateOrder = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction FailureAction `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order UpdateOrder +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} + +// RegistryAuthSource defines options for the "registryAuthFrom" query parameter +// on service update. +type RegistryAuthSource string + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec RegistryAuthSource = "spec" + RegistryAuthFromPreviousSpec RegistryAuthSource = "previous-spec" +) diff --git a/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go new file mode 100644 index 000000000..ebbc097d9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceCreateResponse contains the information returned to a client on the +// creation of a new service. +// +// swagger:model ServiceCreateResponse +type ServiceCreateResponse struct { + + // The ID of the created service. + // Example: ak7w3gjqoa3kuz8xcpnyy0pvl + ID string `json:"ID,omitempty"` + + // Optional warning message. + // + // FIXME(thaJeztah): this should have "omitempty" in the generated type. + // + // Example: ["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go new file mode 100644 index 000000000..b7649096a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// Example: {"Warnings":["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"]} +// +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/swarm.go b/vendor/github.com/moby/moby/api/types/swarm/swarm.go new file mode 100644 index 000000000..842185031 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/swarm.go @@ -0,0 +1,228 @@ +package swarm + +import ( + "net/netip" + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []netip.Prefix + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate and key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []netip.Prefix + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type UnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/task.go b/vendor/github.com/moby/moby/api/types/swarm/task.go new file mode 100644 index 000000000..f61190683 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/task.go @@ -0,0 +1,234 @@ +package swarm + +import ( + "time" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *RuntimeSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` + + // Amount of swap in bytes - can only be used together with a memory limit + // -1 means unlimited + // a null pointer keeps the default behaviour of granting twice the memory + // amount in swap + SwapBytes *int64 `json:"SwapBytes,omitzero"` + + // Tune container memory swappiness (0 to 100) - if not specified, defaults + // to the container OS's default - generally 60, or the value predefined in + // the image; set to -1 to unset a previously set value + MemorySwappiness *int64 `json:MemorySwappiness,omitzero"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/disk_usage.go b/vendor/github.com/moby/moby/api/types/system/disk_usage.go new file mode 100644 index 000000000..33230aed2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/disk_usage.go @@ -0,0 +1,31 @@ +package system + +import ( + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/volume" +) + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + ImageUsage *image.DiskUsage `json:"ImageUsage,omitempty"` + ContainerUsage *container.DiskUsage `json:"ContainerUsage,omitempty"` + VolumeUsage *volume.DiskUsage `json:"VolumeUsage,omitempty"` + BuildCacheUsage *build.DiskUsage `json:"BuildCacheUsage,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/info.go b/vendor/github.com/moby/moby/api/types/system/info.go new file mode 100644 index 000000000..20df949e4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/info.go @@ -0,0 +1,171 @@ +package system + +import ( + "net/netip" + + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" +) + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]RuntimeWithStatus + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"` + CDISpecDirs []string + DiscoveredDevices []DeviceInfo `json:",omitempty"` + NRI *NRIInfo `json:",omitempty"` + + Containerd *ContainerdInfo `json:",omitempty"` + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +// ContainerdInfo holds information about the containerd instance used by the daemon. +type ContainerdInfo struct { + // Address is the path to the containerd socket. + Address string `json:",omitempty"` + // Namespaces is the containerd namespaces used by the daemon. + Namespaces ContainerdNamespaces +} + +// ContainerdNamespaces reflects the containerd namespaces used by the daemon. +// +// These namespaces can be configured in the daemon configuration, and are +// considered to be used exclusively by the daemon, +// +// As these namespaces are considered to be exclusively accessed +// by the daemon, it is not recommended to change these values, +// or to change them to a value that is used by other systems, +// such as cri-containerd. +type ContainerdNamespaces struct { + // Containers holds the default containerd namespace used for + // containers managed by the daemon. + // + // The default namespace for containers is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Containers string + + // Plugins holds the default containerd namespace used for + // plugins managed by the daemon. + // + // The default namespace for plugins is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Plugins string +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [Info] struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + // ID is the actual commit ID or version of external tool. + ID string +} + +// NetworkAddressPool is a temp struct used by [Info] struct. +type NetworkAddressPool struct { + Base netip.Prefix + Size int +} + +// FirewallInfo describes the firewall backend. +type FirewallInfo struct { + // Driver is the name of the firewall backend driver. + Driver string `json:"Driver"` + // Info is a list of label/value pairs, containing information related to the firewall. + Info [][2]string `json:"Info,omitempty"` +} + +// DeviceInfo represents a discoverable device from a device driver. +type DeviceInfo struct { + // Source indicates the origin device driver. + Source string `json:"Source"` + // ID is the unique identifier for the device. + // Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID + ID string `json:"ID"` +} + +// NRIInfo describes the NRI configuration. +type NRIInfo struct { + Info [][2]string `json:"Info,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/runtime.go b/vendor/github.com/moby/moby/api/types/system/runtime.go new file mode 100644 index 000000000..33cad3674 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/runtime.go @@ -0,0 +1,20 @@ +package system + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. +type RuntimeWithStatus struct { + Runtime + Status map[string]string `json:"status,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/version_response.go b/vendor/github.com/moby/moby/api/types/system/version_response.go new file mode 100644 index 000000000..61cd1b6e2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/version_response.go @@ -0,0 +1,58 @@ +package system + +// VersionResponse contains information about the Docker server host. +// GET "/version" +type VersionResponse struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo `json:",omitempty"` + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string `json:"ApiVersion"` + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + GitCommit string `json:",omitempty"` + GoVersion string `json:",omitempty"` + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + + // Details contains Key/value pairs of strings with additional information + // about the component. These values are intended for informational purposes + // only, and their content is not defined, and not part of the API + // specification. + // + // These messages can be printed by the client as information to the user. + Details map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/types.go b/vendor/github.com/moby/moby/api/types/types.go new file mode 100644 index 000000000..bb5aa41a8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/types.go @@ -0,0 +1,21 @@ +package types + +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams. + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams. + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" + + // MediaTypeJSON is the MIME-Type for JSON objects. + MediaTypeJSON = "application/json" + + // MediaTypeNDJSON is the MIME-Type for Newline Delimited JSON objects streams (https://github.com/ndjson/ndjson-spec). + MediaTypeNDJSON = "application/x-ndjson" + + // MediaTypeJSONLines is the MIME-Type for JSONLines objects streams (https://jsonlines.org/). + MediaTypeJSONLines = "application/jsonl" + + // MediaTypeJSONSequence is the MIME-Type for JSON Text Sequences (RFC7464). + MediaTypeJSONSequence = "application/json-seq" +) diff --git a/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go new file mode 100644 index 000000000..07b75d12a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/moby/moby/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopology is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/create_request.go b/vendor/github.com/moby/moby/api/types/volume/create_request.go new file mode 100644 index 000000000..3217df827 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/create_request.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateRequest VolumeConfig +// +// # Volume configuration +// +// swagger:model CreateRequest +type CreateRequest struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + // Example: custom + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + // Example: tardis + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/disk_usage.go b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go new file mode 100644 index 000000000..e2afbac65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for volume resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active volumes. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of volumes. + // + Items []Volume `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive volumes. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all volumes. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by volumes. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/list_response.go b/vendor/github.com/moby/moby/api/types/volume/list_response.go new file mode 100644 index 000000000..f257762f0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/list_response.go @@ -0,0 +1,22 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// # Volume list response +// +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + // Example: [] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/prune_report.go b/vendor/github.com/moby/moby/api/types/volume/prune_report.go new file mode 100644 index 000000000..7f501d01a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/prune_report.go @@ -0,0 +1,8 @@ +package volume + +// PruneReport contains the response for Engine API: +// POST "/volumes/prune" +type PruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volume.go b/vendor/github.com/moby/moby/api/types/volume/volume.go new file mode 100644 index 000000000..524ebfb8a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volume.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// +// swagger:model Volume +type Volume struct { + + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + + // Date/Time the volume was created. + // Example: 2016-06-07T20:31:11.853781916Z + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Example: custom + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Example: /var/lib/docker/volumes/tardis + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Example: tardis + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Example: local + // Required: true + // Enum: ["local","global"] + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + // Example: {"hello":"world"} + Status map[string]any `json:"Status,omitempty"` + + // usage data + UsageData *UsageData `json:"UsageData,omitempty"` +} + +// UsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model UsageData +type UsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/moby/moby/client/LICENSE b/vendor/github.com/moby/moby/client/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/client/README.md b/vendor/github.com/moby/moby/client/README.md new file mode 100644 index 000000000..115e604db --- /dev/null +++ b/vendor/github.com/moby/moby/client/README.md @@ -0,0 +1,52 @@ +# Go client for the Docker Engine API + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/moby/moby/client)](https://pkg.go.dev/github.com/moby/moby/client) +![GitHub License](https://img.shields.io/github/license/moby/moby) +[![Go Report Card](https://goreportcard.com/badge/github.com/moby/moby/client)](https://goreportcard.com/report/github.com/moby/moby/client) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/moby/moby/badge)](https://scorecard.dev/viewer/?uri=github.com/moby/moby) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10989/badge)](https://www.bestpractices.dev/projects/10989) + +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does; running containers, pulling or pushing images, etc. + +For example, to list all containers (the equivalent of `docker ps --all`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/moby/moby/client" +) + +func main() { + // Create a new client that handles common environment variables + // for configuration (DOCKER_HOST, DOCKER_API_VERSION), and does + // API-version negotiation to allow downgrading the API version + // when connecting with an older daemon version. + apiClient, err := client.New(client.FromEnv) + if err != nil { + panic(err) + } + defer apiClient.Close() + + // List all containers (both stopped and running). + result, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{ + All: true, + }) + if err != nil { + panic(err) + } + + // Print each container's ID, status and the image it was created from. + fmt.Printf("%s %-22s %s\n", "ID", "STATUS", "IMAGE") + for _, ctr := range result.Items { + fmt.Printf("%s %-22s %s\n", ctr.ID, ctr.Status, ctr.Image) + } +} +``` + +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/moby/moby/client) diff --git a/vendor/github.com/moby/moby/client/auth.go b/vendor/github.com/moby/moby/client/auth.go new file mode 100644 index 000000000..8baf39d2c --- /dev/null +++ b/vendor/github.com/moby/moby/client/auth.go @@ -0,0 +1,14 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// staticAuth creates a privilegeFn from the given registryAuth. +func staticAuth(registryAuth string) registry.RequestAuthConfig { + return func(ctx context.Context) (string, error) { + return registryAuth, nil + } +} diff --git a/vendor/github.com/moby/moby/client/build_cancel.go b/vendor/github.com/moby/moby/client/build_cancel.go new file mode 100644 index 000000000..f6cfc6bc9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_cancel.go @@ -0,0 +1,21 @@ +package client + +import ( + "context" + "net/url" +) + +type BuildCancelOptions struct{} + +type BuildCancelResult struct{} + +// BuildCancel requests the daemon to cancel the ongoing build request +// with the given id. +func (cli *Client) BuildCancel(ctx context.Context, id string, _ BuildCancelOptions) (BuildCancelResult, error) { + query := url.Values{} + query.Set("id", id) + + resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + defer ensureReaderClosed(resp) + return BuildCancelResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/build_prune.go b/vendor/github.com/moby/moby/client/build_prune.go new file mode 100644 index 000000000..a22e9685e --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_prune.go @@ -0,0 +1,67 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/client/pkg/versions" +) + +// BuildCachePruneOptions hold parameters to prune the build cache. +type BuildCachePruneOptions struct { + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters Filters +} + +// BuildCachePruneResult holds the result from the BuildCachePrune method. +type BuildCachePruneResult struct { + Report build.CachePruneReport +} + +// BuildCachePrune requests the daemon to delete unused cache data. +func (cli *Client) BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) { + var out BuildCachePruneResult + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + + if opts.ReservedSpace != 0 { + // Prior to API v1.48, 'keep-storage' was used to set the reserved space for the build cache. + // TODO(austinvazquez): remove once API v1.47 is no longer supported. See https://github.com/moby/moby/issues/50902 + if versions.LessThanOrEqualTo(cli.version, "1.47") { + query.Set("keep-storage", strconv.Itoa(int(opts.ReservedSpace))) + } else { + query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) + } + } + if opts.MaxUsedSpace != 0 { + query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) + } + if opts.MinFreeSpace != 0 { + query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) + } + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return BuildCachePruneResult{}, err + } + + report := build.CachePruneReport{} + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return BuildCachePruneResult{}, fmt.Errorf("error retrieving disk usage: %w", err) + } + + out.Report = report + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create.go b/vendor/github.com/moby/moby/client/checkpoint_create.go new file mode 100644 index 000000000..b3ba5459d --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointCreateResult holds the result from [client.CheckpointCreate]. +type CheckpointCreateResult struct { + // Add future fields here +} + +// CheckpointCreate creates a checkpoint from the given container. +func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options CheckpointCreateOptions) (CheckpointCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointCreateResult{}, err + } + requestBody := checkpoint.CreateRequest{ + CheckpointID: options.CheckpointID, + CheckpointDir: options.CheckpointDir, + Exit: options.Exit, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, requestBody, nil) + defer ensureReaderClosed(resp) + return CheckpointCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list.go b/vendor/github.com/moby/moby/client/checkpoint_list.go new file mode 100644 index 000000000..5815f836a --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointListOptions holds parameters to list checkpoints for a container. +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointListResult holds the result from the CheckpointList method. +type CheckpointListResult struct { + Items []checkpoint.Summary +} + +// CheckpointList returns the checkpoints of the given container in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) { + var out CheckpointListResult + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return out, err + } + + err = json.NewDecoder(resp.Body).Decode(&out.Items) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_remove.go b/vendor/github.com/moby/moby/client/checkpoint_remove.go new file mode 100644 index 000000000..8042c5088 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_remove.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "net/url" +) + +// CheckpointRemoveOptions holds parameters to delete a checkpoint from a container. +type CheckpointRemoveOptions struct { + CheckpointID string + CheckpointDir string +} + +// CheckpointRemoveResult represents the result of [Client.CheckpointRemove]. +type CheckpointRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// CheckpointRemove deletes the checkpoint with the given name from the given container. +func (cli *Client) CheckpointRemove(ctx context.Context, containerID string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointRemoveResult{}, err + } + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + defer ensureReaderClosed(resp) + return CheckpointRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/client.go b/vendor/github.com/moby/moby/client/client.go new file mode 100644 index 000000000..0567ba11d --- /dev/null +++ b/vendor/github.com/moby/moby/client/client.go @@ -0,0 +1,428 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/reference/api/engine/ + +# Usage + +You use the library by constructing a client object using [New] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option. Other options can be configured +manually by passing any of the available [Opt] options. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + "log" + + "github.com/moby/moby/client" + ) + + func main() { + // Create a new client that handles common environment variables + // for configuration (DOCKER_HOST, DOCKER_API_VERSION), and does + // API-version negotiation to allow downgrading the API version + // when connecting with an older daemon version. + apiClient, err := client.New(client.FromEnv) + if err != nil { + log.Fatal(err) + } + + // List all containers (both stopped and running). + result, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{ + All: true, + }) + if err != nil { + log.Fatal(err) + } + + // Print each container's ID, status and the image it was created from. + fmt.Printf("%s %-22s %s\n", "ID", "STATUS", "IMAGE") + for _, ctr := range result.Items { + fmt.Printf("%s %-22s %s\n", ctr.ID, ctr.Status, ctr.Image) + } + } +*/ +package client + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "path" + "strings" + "sync" + "sync/atomic" + "time" + + cerrdefs "github.com/containerd/errdefs" + "github.com/docker/go-connections/sockets" + "github.com/moby/moby/client/pkg/versions" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// DummyHost is a hostname used for local communication. +// +// It acts as a valid formatted hostname for local connections (such as "unix://" +// or "npipe://") which do not require a hostname. It should never be resolved, +// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] +// and [RFC 6761, Section 6.3]). +// +// [RFC 7230, Section 5.4] defines that an empty header must be used for such +// cases: +// +// If the authority component is missing or undefined for the target URI, +// then a client MUST send a Host header field with an empty field-value. +// +// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not +// allow an empty header to be used, and requires req.URL.Scheme to be either +// "http" or "https". +// +// For further details, refer to: +// +// - https://github.com/docker/engine-api/issues/189 +// - https://github.com/golang/go/issues/13624 +// - https://github.com/golang/go/issues/61076 +// - https://github.com/moby/moby/issues/45935 +// +// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 +// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 +// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 +// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 +const DummyHost = "api.moby.localhost" + +// MaxAPIVersion is the highest REST API version supported by the client. +// If API-version negotiation is enabled, the client may downgrade its API version. +// Similarly, the [WithAPIVersion] and [WithAPIVersionFromEnv] options allow +// overriding the version and disable API-version negotiation. +// +// This version may be lower than the version of the api library module used. +const MaxAPIVersion = "1.52" + +// MinAPIVersion is the minimum API version supported by the client. API versions +// below this version are not considered when performing API-version negotiation. +const MinAPIVersion = "1.44" + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + clientConfig + + // negotiated indicates that API version negotiation took place + negotiated atomic.Bool + + // negotiateLock is used to single-flight the version negotiation process + negotiateLock sync.Mutex + + // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). + // Store the original transport as the http.Client transport will be wrapped with tracing libs. + baseTransport *http.Transport +} + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// CheckRedirect specifies the policy for dealing with redirect responses. It +// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for +// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise +// returns a [http.ErrUseLastResponse], which is special-cased by http.Client +// to use the last response. +// +// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The client (and by extension API client) can be made to send +// a request like "POST /containers//start" where what would normally be in the +// name section of the URL is empty. This triggers an HTTP 301 from the daemon. +// +// In go 1.8 this 301 is converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like "Error response from daemon: page not found". +func CheckRedirect(_ *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client. +// +// Deprecated: use New. This function will be removed in the next release. +func NewClientWithOpts(ops ...Opt) (*Client, error) { + return New(ops...) +} + +// New initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. +// +// It takes an optional list of [Opt] functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables ([FromEnv]). +// +// By default, the client automatically negotiates the API version to use when +// making requests. API version negotiation is performed on the first request; +// subsequent requests do not re-negotiate. Use [WithAPIVersion] or +// [WithAPIVersionFromEnv] to configure the client with a fixed API version +// and disable API version negotiation. +// +// cli, err := client.New( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) +func New(ops ...Opt) (*Client, error) { + hostURL, err := ParseHostURL(DefaultDockerHost) + if err != nil { + return nil, err + } + + client, err := defaultHTTPClient(hostURL) + if err != nil { + return nil, err + } + c := &Client{ + clientConfig: clientConfig{ + host: DefaultDockerHost, + version: MaxAPIVersion, + client: client, + proto: hostURL.Scheme, + addr: hostURL.Host, + traceOpts: []otelhttp.Option{ + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + }, + }, + } + cfg := &c.clientConfig + + for _, op := range ops { + if err := op(cfg); err != nil { + return nil, err + } + } + + if cfg.envAPIVersion != "" { + c.setAPIVersion(cfg.envAPIVersion) + } else if cfg.manualAPIVersion != "" { + c.setAPIVersion(cfg.manualAPIVersion) + } + + if tr, ok := c.client.Transport.(*http.Transport); ok { + // Store the base transport before we wrap it in tracing libs below + // This is used, as an example, to close idle connections when the client is closed + c.baseTransport = tr + } + + if c.scheme == "" { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + if c.tlsConfig() != nil { + c.scheme = "https" + } else { + c.scheme = "http" + } + } + + c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...) + + return c, nil +} + +func (cli *Client) tlsConfig() *tls.Config { + if cli.baseTransport == nil { + return nil + } + return cli.baseTransport.TLSClientConfig +} + +func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { + transport := &http.Transport{} + // Necessary to prevent long-lived processes using the + // client from leaking connections due to idle connections + // not being released. + // TODO: see if we can also address this from the server side, + // or in go-connections. + // see: https://github.com/moby/moby/issues/45539 + transport.MaxIdleConns = 6 + transport.IdleConnTimeout = 30 * time.Second + err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if cli.baseTransport != nil { + cli.baseTransport.CloseIdleConnections() + return nil + } + return nil +} + +// checkVersion manually triggers API version negotiation (if configured). +// This allows for version-dependent code to use the same version as will +// be negotiated when making the actual requests, and for which cases +// we cannot do the negotiation lazily. +func (cli *Client) checkVersion(ctx context.Context) error { + if cli.negotiated.Load() { + return nil + } + _, err := cli.Ping(ctx, PingOptions{ + NegotiateAPIVersion: true, + }) + return err +} + +// getAPIPath returns the versioned request path to call the API. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + _ = cli.checkVersion(ctx) + if cli.version != "" { + apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// negotiateAPIVersion updates the version to match the API version from +// the ping response. +// +// It returns an error if version is invalid, or lower than the minimum +// supported API version in which case the client's API version is not +// updated, and negotiation is not marked as completed. +func (cli *Client) negotiateAPIVersion(pingVersion string) error { + var err error + pingVersion, err = parseAPIVersion(pingVersion) + if err != nil { + return err + } + + if versions.LessThan(pingVersion, MinAPIVersion) { + return cerrdefs.ErrInvalidArgument.WithMessage(fmt.Sprintf("API version %s is not supported by this client: the minimum supported API version is %s", pingVersion, MinAPIVersion)) + } + + // if the client is not initialized with a version, start with the latest supported version + negotiatedVersion := cli.version + if negotiatedVersion == "" { + negotiatedVersion = MaxAPIVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(pingVersion, negotiatedVersion) { + negotiatedVersion = pingVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + cli.setAPIVersion(negotiatedVersion) + return nil +} + +// setAPIVersion sets the client's API version and marks API version negotiation +// as completed, so that automatic API version negotiation (if enabled) won't +// be performed on the next request. +func (cli *Client) setAPIVersion(version string) { + cli.version = version + cli.negotiated.Store(true) +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + proto, addr, ok := strings.Cut(host, "://") + if !ok || addr == "" { + return nil, fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { + if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { + return nil + } + + if cli.baseTransport.TLSClientConfig != nil { + // When using a tls config we don't use the configured dialer but instead a fallback dialer... + // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn + // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. + return nil + } + return cli.baseTransport.DialContext +} + +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. It is used by +// ["docker dial-stdio"]. +// +// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return cli.dialer() +} + +func (cli *Client) dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if dialFn := cli.dialerFromTransport(); dialFn != nil { + return dialFn(ctx, cli.proto, cli.addr) + } + switch cli.proto { + case "unix": + return net.Dial(cli.proto, cli.addr) + case "npipe": + ctx, cancel := context.WithTimeout(ctx, 32*time.Second) + defer cancel() + return dialPipeContext(ctx, cli.addr) + default: + if tlsConfig := cli.tlsConfig(); tlsConfig != nil { + return tls.Dial(cli.proto, cli.addr, tlsConfig) + } + return net.Dial(cli.proto, cli.addr) + } + } +} diff --git a/vendor/github.com/moby/moby/client/client_interfaces.go b/vendor/github.com/moby/moby/client/client_interfaces.go new file mode 100644 index 000000000..4bbd45a6e --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_interfaces.go @@ -0,0 +1,242 @@ +package client + +import ( + "context" + "io" + "net" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + stableAPIClient + CheckpointAPIClient // CheckpointAPIClient is still experimental. +} + +type stableAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + RegistrySearchClient + ExecAPIClient + ImageBuildAPIClient + ImageAPIClient + NetworkAPIClient + PluginAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + ServerVersion(ctx context.Context, options ServerVersionOptions) (ServerVersionResult, error) + HijackDialer + Dialer() func(context.Context) (net.Conn, error) + Close() error + SwarmManagementAPIClient +} + +// SwarmManagementAPIClient defines all methods for managing Swarm-specific +// objects. +type SwarmManagementAPIClient interface { + SwarmAPIClient + NodeAPIClient + ServiceAPIClient + TaskAPIClient + SecretAPIClient + ConfigAPIClient +} + +// HijackDialer defines methods for a hijack dialer. +type HijackDialer interface { + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) +} + +// CheckpointAPIClient defines API client methods for the checkpoints. +// +// Experimental: checkpoint and restore is still an experimental feature, +// and only available if the daemon is running with experimental features +// enabled. +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options CheckpointCreateOptions) (CheckpointCreateResult, error) + CheckpointRemove(ctx context.Context, container string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) + CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) + ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error) + ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) + ContainerUpdate(ctx context.Context, container string, updateConfig ContainerUpdateOptions) (ContainerUpdateResult, error) + ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) (ContainerRemoveResult, error) + ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) + + ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (ContainerLogsResult, error) + + ContainerStart(ctx context.Context, container string, options ContainerStartOptions) (ContainerStartResult, error) + ContainerStop(ctx context.Context, container string, options ContainerStopOptions) (ContainerStopResult, error) + ContainerRestart(ctx context.Context, container string, options ContainerRestartOptions) (ContainerRestartResult, error) + ContainerPause(ctx context.Context, container string, options ContainerPauseOptions) (ContainerPauseResult, error) + ContainerUnpause(ctx context.Context, container string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) + ContainerWait(ctx context.Context, container string, options ContainerWaitOptions) ContainerWaitResult + ContainerKill(ctx context.Context, container string, options ContainerKillOptions) (ContainerKillResult, error) + + ContainerRename(ctx context.Context, container string, options ContainerRenameOptions) (ContainerRenameResult, error) + ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) (ContainerResizeResult, error) + ContainerAttach(ctx context.Context, container string, options ContainerAttachOptions) (ContainerAttachResult, error) + ContainerCommit(ctx context.Context, container string, options ContainerCommitOptions) (ContainerCommitResult, error) + ContainerDiff(ctx context.Context, container string, options ContainerDiffOptions) (ContainerDiffResult, error) + ContainerExport(ctx context.Context, container string, options ContainerExportOptions) (ContainerExportResult, error) + + ContainerStats(ctx context.Context, container string, options ContainerStatsOptions) (ContainerStatsResult, error) + ContainerTop(ctx context.Context, container string, options ContainerTopOptions) (ContainerTopResult, error) + + ContainerStatPath(ctx context.Context, container string, options ContainerStatPathOptions) (ContainerStatPathResult, error) + CopyFromContainer(ctx context.Context, container string, options CopyFromContainerOptions) (CopyFromContainerResult, error) + CopyToContainer(ctx context.Context, container string, options CopyToContainerOptions) (CopyToContainerResult, error) +} + +type ExecAPIClient interface { + ExecCreate(ctx context.Context, container string, options ExecCreateOptions) (ExecCreateResult, error) + ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) + ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) + + ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) + ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image string, options DistributionInspectOptions) (DistributionInspectResult, error) +} + +type RegistrySearchClient interface { + ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) +} + +// ImageBuildAPIClient defines API client methods for building images +// using the REST API. +type ImageBuildAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options ImageBuildOptions) (ImageBuildResult, error) + BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) + BuildCancel(ctx context.Context, id string, opts BuildCancelOptions) (BuildCancelResult, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) + + ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) + ImagePull(ctx context.Context, ref string, options ImagePullOptions) (ImagePullResponse, error) + ImagePush(ctx context.Context, ref string, options ImagePushOptions) (ImagePushResponse, error) + ImageRemove(ctx context.Context, image string, options ImageRemoveOptions) (ImageRemoveResult, error) + ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) + ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) + + ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (ImageInspectResult, error) + ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) (ImageHistoryResult, error) + + ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (ImageLoadResult, error) + ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (ImageSaveResult, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) + NetworkInspect(ctx context.Context, network string, options NetworkInspectOptions) (NetworkInspectResult, error) + NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) + NetworkRemove(ctx context.Context, network string, options NetworkRemoveOptions) (NetworkRemoveResult, error) + NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) + + NetworkConnect(ctx context.Context, network string, options NetworkConnectOptions) (NetworkConnectResult, error) + NetworkDisconnect(ctx context.Context, network string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) + NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) + NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) + NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginCreate(ctx context.Context, createContext io.Reader, options PluginCreateOptions) (PluginCreateResult, error) + PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (PluginInstallResult, error) + PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) + PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) + PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) + + PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) + PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) + PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) + PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) + PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) + ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) + ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) + ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) + ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) + + ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) +} + +// TaskAPIClient defines API client methods to manage swarm tasks. +type TaskAPIClient interface { + TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) + TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) + + TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) + SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) + SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) + SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) + SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) + + SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) + SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options EventsListOptions) EventsResult + Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) + RegistryLogin(ctx context.Context, auth RegistryLoginOptions) (RegistryLoginResult, error) + DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) + Ping(ctx context.Context, options PingOptions) (PingResult, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) + VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) + VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) + VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) + VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) + VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) + SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) + SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) + SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) + SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) + ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) + ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) + ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) + ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) +} diff --git a/vendor/github.com/moby/moby/client/client_options.go b/vendor/github.com/moby/moby/client/client_options.go new file mode 100644 index 000000000..295d29918 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_options.go @@ -0,0 +1,343 @@ +package client + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" +) + +type clientConfig struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. + customHTTPHeaders map[string]string + + // manualAPIVersion contains the API version set by users. This field + // will only be non-empty if a valid-formed version was set through + // [WithAPIVersion]. + // + // If both manualAPIVersion and envAPIVersion are set, manualAPIVersion + // takes precedence. Either field disables API-version negotiation. + manualAPIVersion string + + // envAPIVersion contains the API version set by users. This field + // will only be non-empty if a valid-formed version was set through + // [WithAPIVersionFromEnv]. + // + // If both manualAPIVersion and envAPIVersion are set, manualAPIVersion + // takes precedence. Either field disables API-version negotiation. + envAPIVersion string + + // traceOpts is a list of options to configure the tracing span. + traceOpts []otelhttp.Option +} + +// Opt is a configuration option to initialize a [Client]. +type Opt func(*clientConfig) error + +// FromEnv configures the client with values from environment variables. It +// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], +// and [WithAPIVersionFromEnv] options. +// +// FromEnv uses the following environment variables: +// +// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. +// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the +// API to use, leave empty for latest. +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). +func FromEnv(c *clientConfig) error { + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithAPIVersionFromEnv(), + } + for _, op := range ops { + if err := op(c); err != nil { + return err + } + } + return nil +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. It returns +// an error if the client does not have a [http.Transport] configured. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *clientConfig) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return fmt.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *clientConfig) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + // For test transports, we skip transport configuration but still + // set the host fields so that the client can use them for headers + if _, ok := c.client.Transport.(testRoundTripper); ok { + return nil + } + return fmt.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// testRoundTripper allows us to inject a mock-transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type testRoundTripper func(*http.Request) (*http.Response, error) + +func (tf testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +func (testRoundTripper) skipConfigureTransport() bool { return true } + +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *clientConfig) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + +// WithHTTPClient overrides the client's HTTP client with the specified one. +func WithHTTPClient(client *http.Client) Opt { + return func(c *clientConfig) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client. +func WithTimeout(timeout time.Duration) Opt { + return func(c *clientConfig) error { + c.client.Timeout = timeout + return nil + } +} + +// WithUserAgent configures the User-Agent header to use for HTTP requests. +// It overrides any User-Agent set in headers. When set to an empty string, +// the User-Agent header is removed, and no header is sent. +func WithUserAgent(ua string) Opt { + return func(c *clientConfig) error { + c.userAgent = &ua + return nil + } +} + +// WithHTTPHeaders appends custom HTTP headers to the client's default headers. +// It does not allow for built-in headers (such as "User-Agent", if set) to +// be overridden. Also see [WithUserAgent]. +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *clientConfig) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one. +func WithScheme(scheme string) Opt { + return func(c *clientConfig) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a TLS config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *clientConfig) error { + transport, ok := c.client.Transport.(*http.Transport) + if !ok { + return fmt.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } + config, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + }) + if err != nil { + return fmt.Errorf("failed to create tls config: %w", err) + } + transport.TLSClientConfig = config + return nil + } +} + +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY +// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, +// TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *clientConfig) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + tlsc, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + }) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + +// WithAPIVersion overrides the client's API version with the specified one, +// and disables API version negotiation. If an empty version is provided, +// this option is ignored to allow version negotiation. The given version +// should be formatted "." (for example, "1.52"). It returns +// an error if the given value not in the correct format. +// +// WithAPIVersion does not validate if the client supports the given version, +// and callers should verify if the version lower than the maximum supported +// version as defined by [MaxAPIVersion]. +// +// [WithAPIVersionFromEnv] takes precedence if [WithAPIVersion] and +// [WithAPIVersionFromEnv] are both set. +func WithAPIVersion(version string) Opt { + return func(c *clientConfig) error { + version = strings.TrimSpace(version) + if val := strings.TrimPrefix(version, "v"); val != "" { + ver, err := parseAPIVersion(val) + if err != nil { + return fmt.Errorf("invalid API version (%s): %w", version, err) + } + c.manualAPIVersion = ver + } + return nil + } +} + +// WithVersion overrides the client version with the specified one. +// +// Deprecated: use [WithAPIVersion] instead. +func WithVersion(version string) Opt { + return WithAPIVersion(version) +} + +// WithAPIVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// If DOCKER_API_VERSION is not set, or set to an empty value, the version +// is not modified. +// +// WithAPIVersion does not validate if the client supports the given version, +// and callers should verify if the version lower than the maximum supported +// version as defined by [MaxAPIVersion]. +// +// [WithAPIVersionFromEnv] takes precedence if [WithAPIVersion] and +// [WithAPIVersionFromEnv] are both set. +func WithAPIVersionFromEnv() Opt { + return func(c *clientConfig) error { + version := strings.TrimSpace(os.Getenv(EnvOverrideAPIVersion)) + if val := strings.TrimPrefix(version, "v"); val != "" { + ver, err := parseAPIVersion(val) + if err != nil { + return fmt.Errorf("invalid API version (%s): %w", version, err) + } + c.envAPIVersion = ver + } + return nil + } +} + +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// +// Deprecated: use [WithAPIVersionFromEnv] instead. +func WithVersionFromEnv() Opt { + return WithAPIVersionFromEnv() +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests do not re-negotiate. +// +// Deprecated: API-version negotiation is now enabled by default. Use [WithAPIVersion] +// or [WithAPIVersionFromEnv] to disable API version negotiation. +func WithAPIVersionNegotiation() Opt { + return func(c *clientConfig) error { + return nil + } +} + +// WithTraceProvider sets the trace provider for the client. +// If this is not set then the global trace provider is used. +func WithTraceProvider(provider trace.TracerProvider) Opt { + return WithTraceOptions(otelhttp.WithTracerProvider(provider)) +} + +// WithTraceOptions sets tracing span options for the client. +func WithTraceOptions(opts ...otelhttp.Option) Opt { + return func(c *clientConfig) error { + c.traceOpts = append(c.traceOpts, opts...) + return nil + } +} diff --git a/vendor/github.com/moby/moby/client/client_unix.go b/vendor/github.com/moby/moby/client/client_unix.go new file mode 100644 index 000000000..1fb9fbfb9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_unix.go @@ -0,0 +1,18 @@ +//go:build !windows + +package client + +import ( + "context" + "net" + "syscall" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(_ context.Context, _ string) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/moby/moby/client/client_windows.go b/vendor/github.com/moby/moby/client/client_windows.go new file mode 100644 index 000000000..b471c0612 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_windows.go @@ -0,0 +1,17 @@ +package client + +import ( + "context" + "net" + + "github.com/Microsoft/go-winio" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(ctx context.Context, addr string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) +} diff --git a/vendor/github.com/moby/moby/client/config_create.go b/vendor/github.com/moby/moby/client/config_create.go new file mode 100644 index 000000000..874e2c947 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigCreateOptions holds options for creating a config. +type ConfigCreateOptions struct { + Spec swarm.ConfigSpec +} + +// ConfigCreateResult holds the result from the ConfigCreate method. +type ConfigCreateResult struct { + ID string +} + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) { + resp, err := cli.post(ctx, "/configs/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return ConfigCreateResult{}, err + } + return ConfigCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_inspect.go b/vendor/github.com/moby/moby/client/config_inspect.go new file mode 100644 index 000000000..0bf0ff791 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigInspectOptions holds options for inspecting a config. +type ConfigInspectOptions struct { + // Add future optional parameters here +} + +// ConfigInspectResult holds the result from the ConfigInspect method. +type ConfigInspectResult struct { + Config swarm.Config + Raw json.RawMessage +} + +// ConfigInspect returns the config information with raw data +func (cli *Client) ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigInspectResult{}, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return ConfigInspectResult{}, err + } + + var out ConfigInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Config) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/config_list.go b/vendor/github.com/moby/moby/client/config_list.go new file mode 100644 index 000000000..ee5e7fee7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters Filters +} + +// ConfigListResult holds the result from the [client.ConfigList] method. +type ConfigListResult struct { + Items []swarm.Config +} + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigListResult{}, err + } + + var out ConfigListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return ConfigListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/config_remove.go b/vendor/github.com/moby/moby/client/config_remove.go new file mode 100644 index 000000000..c77a4c378 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type ConfigRemoveOptions struct { + // Add future optional parameters here +} + +type ConfigRemoveResult struct { + // Add future fields here +} + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigRemoveResult{}, err + } + return ConfigRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_update.go b/vendor/github.com/moby/moby/client/config_update.go new file mode 100644 index 000000000..2651f4b2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigUpdateOptions holds options for updating a config. +type ConfigUpdateOptions struct { + Version swarm.Version + Spec swarm.ConfigSpec +} + +type ConfigUpdateResult struct{} + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigUpdateResult{}, err + } + return ConfigUpdateResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_attach.go b/vendor/github.com/moby/moby/client/container_attach.go new file mode 100644 index 000000000..fa3a2efcc --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_attach.go @@ -0,0 +1,86 @@ +package client + +import ( + "context" + "net/http" + "net/url" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerAttachResult is the result from attaching to a container. +type ContainerAttachResult struct { + HijackedResponse +} + +// ContainerAttach attaches a connection to a container in the server. +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the called to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options ContainerAttachOptions) (ContainerAttachResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerAttachResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + hijacked, err := cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ + "Content-Type": {"text/plain"}, + }) + if err != nil { + return ContainerAttachResult{}, err + } + + return ContainerAttachResult{HijackedResponse: hijacked}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_commit.go b/vendor/github.com/moby/moby/client/container_commit.go new file mode 100644 index 000000000..79da44a54 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/container" +) + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + NoPause bool // NoPause disables pausing the container during commit. + Config *container.Config +} + +// ContainerCommitResult is the result from committing a container. +type ContainerCommitResult struct { + ID string +} + +// ContainerCommit applies changes to a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options ContainerCommitOptions) (ContainerCommitResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerCommitResult{}, err + } + + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return ContainerCommitResult{}, err + } + + if _, ok := ref.(reference.Digested); ok { + return ContainerCommitResult{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = ref.Name() + } + + query := url.Values{} + query.Set("container", containerID) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.NoPause { + query.Set("pause", "0") + } + + var response container.CommitResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerCommitResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerCommitResult{ID: response.ID}, err +} diff --git a/vendor/github.com/moby/moby/client/container_copy.go b/vendor/github.com/moby/moby/client/container_copy.go new file mode 100644 index 000000000..f76511246 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy.go @@ -0,0 +1,137 @@ +package client + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/moby/moby/api/types/container" +) + +type ContainerStatPathOptions struct { + Path string +} + +type ContainerStatPathResult struct { + Stat container.PathStat +} + +// ContainerStatPath returns stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID string, options ContainerStatPathOptions) (ContainerStatPathResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStatPathResult{}, err + } + + query := url.Values{} + query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. + + resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStatPathResult{}, err + } + stat, err := getContainerPathStatFromHeader(resp.Header) + if err != nil { + return ContainerStatPathResult{}, err + } + return ContainerStatPathResult{Stat: stat}, nil +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + DestinationPath string + Content io.Reader + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +type CopyToContainerResult struct{} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID string, options CopyToContainerOptions) (CopyToContainerResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CopyToContainerResult{}, err + } + + query := url.Values{} + query.Set("path", filepath.ToSlash(options.DestinationPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, options.Content, nil) + defer ensureReaderClosed(response) + if err != nil { + return CopyToContainerResult{}, err + } + + return CopyToContainerResult{}, nil +} + +type CopyFromContainerOptions struct { + SourcePath string +} + +type CopyFromContainerResult struct { + Content io.ReadCloser + Stat container.PathStat +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID string, options CopyFromContainerOptions) (CopyFromContainerResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CopyFromContainerResult{}, err + } + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(options.SourcePath)) // Normalize the paths used in the API. + + resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil) + if err != nil { + return CopyFromContainerResult{}, err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(resp.Header) + if err != nil { + ensureReaderClosed(resp) + return CopyFromContainerResult{Stat: stat}, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return CopyFromContainerResult{Content: resp.Body, Stat: stat}, nil +} + +func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { + var stat container.PathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/moby/moby/client/container_create.go b/vendor/github.com/moby/moby/client/container_create.go new file mode 100644 index 000000000..d941a3720 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create.go @@ -0,0 +1,125 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "path" + "sort" + "strings" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) { + cfg := options.Config + + if cfg == nil { + cfg = &container.Config{} + } + + if options.Image != "" { + if cfg.Image != "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("either Image or config.Image should be set") + } + newCfg := *cfg + newCfg.Image = options.Image + cfg = &newCfg + } + + if cfg.Image == "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("config.Image or Image is required") + } + + var response container.CreateResponse + + if options.HostConfig != nil { + options.HostConfig.CapAdd = normalizeCapabilities(options.HostConfig.CapAdd) + options.HostConfig.CapDrop = normalizeCapabilities(options.HostConfig.CapDrop) + } + + query := url.Values{} + if options.Platform != nil { + if p := formatPlatform(*options.Platform); p != "unknown" { + query.Set("platform", p) + } + } + + if options.Name != "" { + query.Set("name", options.Name) + } + + body := container.CreateRequest{ + Config: cfg, + HostConfig: options.HostConfig, + NetworkingConfig: options.NetworkingConfig, + } + + resp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerCreateResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerCreateResult{ID: response.ID, Warnings: response.Warnings}, err +} + +// formatPlatform returns a formatted string representing platform (e.g., "linux/arm/v7"). +// +// It is a fork of [platforms.Format], and does not yet support "os.version", +// as [platforms.FormatAll] does. +// +// [platforms.Format]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L309-L316 +// [platforms.FormatAll]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L318-L330 +func formatPlatform(platform ocispec.Platform) string { + if platform.OS == "" { + return "unknown" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// allCapabilities is a magic value for "all capabilities" +const allCapabilities = "ALL" + +// normalizeCapabilities normalizes capabilities to their canonical form, +// removes duplicates, and sorts the results. +// +// It is similar to [caps.NormalizeLegacyCapabilities], +// but performs no validation based on supported capabilities. +// +// [caps.NormalizeLegacyCapabilities]: https://github.com/moby/moby/blob/v28.3.2/oci/caps/utils.go#L56 +func normalizeCapabilities(caps []string) []string { + var normalized []string + + unique := make(map[string]struct{}) + for _, c := range caps { + c = normalizeCap(c) + if _, ok := unique[c]; ok { + continue + } + unique[c] = struct{}{} + normalized = append(normalized, c) + } + + sort.Strings(normalized) + return normalized +} + +// normalizeCap normalizes a capability to its canonical format by upper-casing +// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" +// magic-value. +func normalizeCap(capability string) string { + capability = strings.ToUpper(capability) + if capability == allCapabilities { + return capability + } + if !strings.HasPrefix(capability, "CAP_") { + capability = "CAP_" + capability + } + return capability +} diff --git a/vendor/github.com/moby/moby/client/container_create_opts.go b/vendor/github.com/moby/moby/client/container_create_opts.go new file mode 100644 index 000000000..8580e20d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreateOptions holds parameters to create a container. +type ContainerCreateOptions struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + Name string + + // Image is a shortcut for Config.Image - only one of Image or Config.Image should be set. + Image string +} + +// ContainerCreateResult is the result from creating a container. +type ContainerCreateResult struct { + ID string + Warnings []string +} diff --git a/vendor/github.com/moby/moby/client/container_diff.go b/vendor/github.com/moby/moby/client/container_diff.go new file mode 100644 index 000000000..ec904337e --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff.go @@ -0,0 +1,30 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string, options ContainerDiffOptions) (ContainerDiffResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerDiffResult{}, err + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerDiffResult{}, err + } + + var changes []container.FilesystemChange + err = json.NewDecoder(resp.Body).Decode(&changes) + if err != nil { + return ContainerDiffResult{}, err + } + return ContainerDiffResult{Changes: changes}, err +} diff --git a/vendor/github.com/moby/moby/client/container_diff_opts.go b/vendor/github.com/moby/moby/client/container_diff_opts.go new file mode 100644 index 000000000..5e3c37ab4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff_opts.go @@ -0,0 +1,13 @@ +package client + +import "github.com/moby/moby/api/types/container" + +// ContainerDiffOptions holds parameters to show differences in a container filesystem. +type ContainerDiffOptions struct { + // Currently no options, but this allows for future extensibility +} + +// ContainerDiffResult is the result from showing differences in a container filesystem. +type ContainerDiffResult struct { + Changes []container.FilesystemChange +} diff --git a/vendor/github.com/moby/moby/client/container_exec.go b/vendor/github.com/moby/moby/client/container_exec.go new file mode 100644 index 000000000..1af6a4e20 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec.go @@ -0,0 +1,205 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" +) + +// ExecCreateOptions is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateOptions struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + TTY bool // Attach standard streams to a tty. + ConsoleSize ConsoleSize // Initial terminal size [height, width], unused if TTY == false + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// ExecCreateResult holds the result of creating a container exec. +type ExecCreateResult struct { + ID string +} + +// ExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ExecCreate(ctx context.Context, containerID string, options ExecCreateOptions) (ExecCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ExecCreateResult{}, err + } + + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecCreateResult{}, err + } + + req := container.ExecCreateRequest{ + User: options.User, + Privileged: options.Privileged, + Tty: options.TTY, + ConsoleSize: consoleSize, + AttachStdin: options.AttachStdin, + AttachStderr: options.AttachStderr, + AttachStdout: options.AttachStdout, + DetachKeys: options.DetachKeys, + Env: options.Env, + WorkingDir: options.WorkingDir, + Cmd: options.Cmd, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecCreateResult{}, err + } + + var response container.ExecCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ExecCreateResult{ID: response.ID}, err +} + +type ConsoleSize struct { + Height, Width uint +} + +// ExecStartOptions holds options for starting a container exec. +type ExecStartOptions struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize +} + +// ExecStartResult holds the result of starting a container exec. +type ExecStartResult struct { +} + +// ExecStart starts an exec process already created in the docker host. +func (cli *Client) ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecStartResult{}, err + } + + req := container.ExecStartRequest{ + Detach: options.Detach, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, req, nil) + defer ensureReaderClosed(resp) + return ExecStartResult{}, err +} + +// ExecAttachOptions holds options for attaching to a container exec. +type ExecAttachOptions struct { + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize `json:",omitzero"` +} + +// ExecAttachResult holds the result of attaching to a container exec. +type ExecAttachResult struct { + HijackedResponse +} + +// ExecAttach attaches a connection to an exec process in the server. +// +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the called to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// You can use [stdcopy.StdCopy] to demultiplex this stream. Refer to +// [Client.ContainerAttach] for details about the multiplexed stream. +// +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +func (cli *Client) ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecAttachResult{}, err + } + req := container.ExecStartRequest{ + Detach: false, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + response, err := cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, req, http.Header{ + "Content-Type": {"application/json"}, + }) + return ExecAttachResult{HijackedResponse: response}, err +} + +func getConsoleSize(hasTTY bool, consoleSize ConsoleSize) (*[2]uint, error) { + if consoleSize.Height != 0 || consoleSize.Width != 0 { + if !hasTTY { + return nil, errdefs.ErrInvalidArgument.WithMessage("console size is only supported when TTY is enabled") + } + return &[2]uint{consoleSize.Height, consoleSize.Width}, nil + } + return nil, nil +} + +// ExecInspectOptions holds options for inspecting a container exec. +type ExecInspectOptions struct { +} + +// ExecInspectResult holds the result of inspecting a container exec. +// +// It provides a subset of the information included in [container.ExecInspectResponse]. +// +// TODO(thaJeztah): include all fields of [container.ExecInspectResponse] ? +type ExecInspectResult struct { + ID string + ContainerID string + Running bool + ExitCode int + PID int +} + +// ExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) { + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecInspectResult{}, err + } + + var response container.ExecInspectResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return ExecInspectResult{}, err + } + + var ec int + if response.ExitCode != nil { + ec = *response.ExitCode + } + + return ExecInspectResult{ + ID: response.ID, + ContainerID: response.ContainerID, + Running: response.Running, + ExitCode: ec, + PID: response.Pid, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_export.go b/vendor/github.com/moby/moby/client/container_export.go new file mode 100644 index 000000000..2d33efb7d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerExportOptions specifies options for container export operations. +type ContainerExportOptions struct { + // Currently no options are defined for ContainerExport +} + +// ContainerExportResult represents the result of a container export operation. +type ContainerExportResult interface { + io.ReadCloser +} + +// ContainerExport retrieves the raw contents of a container +// and returns them as an [io.ReadCloser]. It's up to the caller +// to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerExport(ctx context.Context, containerID string, options ContainerExportOptions) (ContainerExportResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return &containerExportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerExportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerExportResult)(nil) + _ ContainerExportResult = (*containerExportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_inspect.go b/vendor/github.com/moby/moby/client/container_inspect.go new file mode 100644 index 000000000..4f12c4657 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerInspectOptions holds options for inspecting a container using +// the [Client.ConfigInspect] method. +type ContainerInspectOptions struct { + // Size controls whether the container's filesystem size should be calculated. + // When set, the [container.InspectResponse.SizeRw] and [container.InspectResponse.SizeRootFs] + // fields in [ContainerInspectResult.Container] are populated with the result. + // + // Calculating the size can be a costly operation, and should not be used + // unless needed. + Size bool +} + +// ContainerInspectResult holds the result from the [Client.ConfigInspect] method. +type ContainerInspectResult struct { + Container container.InspectResponse + Raw json.RawMessage +} + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string, options ContainerInspectOptions) (ContainerInspectResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerInspectResult{}, err + } + + query := url.Values{} + if options.Size { + query.Set("size", "1") + } + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + return ContainerInspectResult{}, err + } + var out ContainerInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Container) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/container_kill.go b/vendor/github.com/moby/moby/client/container_kill.go new file mode 100644 index 000000000..ae7a4ebd8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerKillOptions holds options for [Client.ContainerKill]. +type ContainerKillOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after a + // timeout. If no value is set, the default (SIGKILL) is used. + Signal string `json:",omitempty"` +} + +// ContainerKillResult holds the result of [Client.ContainerKill], +type ContainerKillResult struct { + // Add future fields here. +} + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID string, options ContainerKillOptions) (ContainerKillResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerKillResult{}, err + } + + query := url.Values{} + if options.Signal != "" { + query.Set("signal", options.Signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerKillResult{}, err + } + return ContainerKillResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_list.go b/vendor/github.com/moby/moby/client/container_list.go new file mode 100644 index 000000000..fcd39c4da --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list.go @@ -0,0 +1,62 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/container" +) + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters Filters +} + +type ContainerListResult struct { + Items []container.Summary +} + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerListResult{}, err + } + + var containers []container.Summary + err = json.NewDecoder(resp.Body).Decode(&containers) + return ContainerListResult{Items: containers}, err +} diff --git a/vendor/github.com/moby/moby/client/container_logs.go b/vendor/github.com/moby/moby/client/container_logs.go new file mode 100644 index 000000000..636ab2212 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs.go @@ -0,0 +1,118 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerLogsResult is the result of a container logs operation. +type ContainerLogsResult interface { + io.ReadCloser +} + +// ContainerLogs returns the logs generated by a container in an [io.ReadCloser]. +// It's up to the caller to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options ContainerLogsOptions) (ContainerLogsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "until": %w`, err) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &containerLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerLogsResult)(nil) + _ ContainerLogsResult = (*containerLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_pause.go b/vendor/github.com/moby/moby/client/container_pause.go new file mode 100644 index 000000000..07669c897 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerPauseOptions holds options for [Client.ContainerPause]. +type ContainerPauseOptions struct { + // Add future optional parameters here. +} + +// ContainerPauseResult holds the result of [Client.ContainerPause], +type ContainerPauseResult struct { + // Add future fields here. +} + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string, options ContainerPauseOptions) (ContainerPauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerPauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPauseResult{}, err + } + return ContainerPauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_prune.go b/vendor/github.com/moby/moby/client/container_prune.go new file mode 100644 index 000000000..f826f8b6f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerPruneOptions holds parameters to prune containers. +type ContainerPruneOptions struct { + Filters Filters +} + +// ContainerPruneResult holds the result from the [Client.ContainerPrune] method. +type ContainerPruneResult struct { + Report container.PruneReport +} + +// ContainerPrune requests the daemon to delete unused data +func (cli *Client) ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPruneResult{}, err + } + + var report container.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ContainerPruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ContainerPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_remove.go b/vendor/github.com/moby/moby/client/container_remove.go new file mode 100644 index 000000000..0fbfa05fa --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerRemoveResult holds the result of [Client.ContainerRemove], +type ContainerRemoveResult struct { + // Add future fields here. +} + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) (ContainerRemoveResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRemoveResult{}, err + } + + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerRemoveResult{}, err + } + return ContainerRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_rename.go b/vendor/github.com/moby/moby/client/container_rename.go new file mode 100644 index 000000000..7c6d515b3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" + "strings" + + "github.com/containerd/errdefs" +) + +// ContainerRenameOptions represents the options for renaming a container. +type ContainerRenameOptions struct { + NewName string +} + +// ContainerRenameResult represents the result of a container rename operation. +type ContainerRenameResult struct { + // This struct can be expanded in the future if needed +} + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID string, options ContainerRenameOptions) (ContainerRenameResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRenameResult{}, err + } + options.NewName = strings.TrimSpace(options.NewName) + if options.NewName == "" || strings.TrimPrefix(options.NewName, "/") == "" { + // daemons before v29.0 did not handle the canonical name ("/") well + // let's be nice and validate it here before sending + return ContainerRenameResult{}, errdefs.ErrInvalidArgument.WithMessage("new name cannot be blank") + } + + query := url.Values{} + query.Set("name", options.NewName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + defer ensureReaderClosed(resp) + return ContainerRenameResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/container_resize.go b/vendor/github.com/moby/moby/client/container_resize.go new file mode 100644 index 000000000..311a9dcf5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ContainerResizeOptions struct { + Height uint + Width uint +} + +// ContainerResizeResult holds the result of [Client.ContainerResize], +type ContainerResizeResult struct { + // Add future fields here. +} + +// ContainerResize changes the size of the pseudo-TTY for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) (ContainerResizeResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerResizeResult{}, err + } + return ContainerResizeResult{}, nil +} + +// ExecResizeOptions holds options for resizing a container exec TTY. +type ExecResizeOptions ContainerResizeOptions + +// ExecResizeResult holds the result of resizing a container exec TTY. +type ExecResizeResult struct { +} + +// ExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) { + execID, err := trimID("exec", execID) + if err != nil { + return ExecResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/exec/"+execID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecResizeResult{}, err + } + return ExecResizeResult{}, nil + +} diff --git a/vendor/github.com/moby/moby/client/container_restart.go b/vendor/github.com/moby/moby/client/container_restart.go new file mode 100644 index 000000000..e883f7589 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerRestartOptions holds options for [Client.ContainerRestart]. +type ContainerRestartOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerRestartResult holds the result of [Client.ContainerRestart], +type ContainerRestartResult struct { + // Add future fields here. +} + +// ContainerRestart stops, and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerRestartOptions) (ContainerRestartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRestartResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerRestartResult{}, err + } + return ContainerRestartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_start.go b/vendor/github.com/moby/moby/client/container_start.go new file mode 100644 index 000000000..dfb821d1d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerStartOptions holds options for [Client.ContainerStart]. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerStartResult holds the result of [Client.ContainerStart], +type ContainerStartResult struct { + // Add future fields here. +} + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) (ContainerStartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStartResult{}, err + } + + query := url.Values{} + if options.CheckpointID != "" { + query.Set("checkpoint", options.CheckpointID) + } + if options.CheckpointDir != "" { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStartResult{}, err + } + return ContainerStartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stats.go b/vendor/github.com/moby/moby/client/container_stats.go new file mode 100644 index 000000000..277769dbf --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerStatsOptions holds parameters to retrieve container statistics +// using the [Client.ContainerStats] method. +type ContainerStatsOptions struct { + // Stream enables streaming [container.StatsResponse] results instead + // of collecting a single sample. If enabled, the client remains attached + // until the [ContainerStatsResult.Body] is closed or the context is + // cancelled. + Stream bool + + // IncludePreviousSample asks the daemon to collect a prior sample to populate the + // [container.StatsResponse.PreRead] and [container.StatsResponse.PreCPUStats] + // fields. + // + // It set, the daemon collects two samples at a one-second interval before + // returning the result. The first sample populates the PreCPUStats (“previous + // CPU”) field, allowing delta calculations for CPU usage. If false, only + // a single sample is taken and returned immediately, leaving PreRead and + // PreCPUStats empty. + // + // This option has no effect if Stream is enabled. If Stream is enabled, + // [container.StatsResponse.PreCPUStats] is never populated for the first + // record. + IncludePreviousSample bool +} + +// ContainerStatsResult holds the result from [Client.ContainerStats]. +// +// It wraps an [io.ReadCloser] that provides one or more [container.StatsResponse] +// objects for a container, as produced by the "GET /containers/{id}/stats" endpoint. +// If streaming is disabled, the stream contains a single record. +type ContainerStatsResult struct { + Body io.ReadCloser +} + +// ContainerStats retrieves live resource usage statistics for the specified +// container. The caller must close the [io.ReadCloser] in the returned result +// to release associated resources. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerStats(ctx context.Context, containerID string, options ContainerStatsOptions) (ContainerStatsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStatsResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "true") + } else { + // Note: daemons before v29.0 return an error if both set: "cannot have stream=true and one-shot=true" + // + // TODO(thaJeztah): consider making "stream=false" the default for the API as well, or using Accept Header to switch. + query.Set("stream", "false") + if !options.IncludePreviousSample { + query.Set("one-shot", "true") + } + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return ContainerStatsResult{}, err + } + + return ContainerStatsResult{ + Body: newCancelReadCloser(ctx, resp.Body), + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stop.go b/vendor/github.com/moby/moby/client/container_stop.go new file mode 100644 index 000000000..d4d47d8fd --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop.go @@ -0,0 +1,58 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerStopOptions holds the options for [Client.ContainerStop]. +type ContainerStopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerStopResult holds the result of [Client.ContainerStop], +type ContainerStopResult struct { + // Add future fields here. +} + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) (ContainerStopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStopResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStopResult{}, err + } + return ContainerStopResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_top.go b/vendor/github.com/moby/moby/client/container_top.go new file mode 100644 index 000000000..dc0af8ae4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/moby/moby/api/types/container" +) + +// ContainerTopOptions defines options for container top operations. +type ContainerTopOptions struct { + Arguments []string +} + +// ContainerTopResult represents the result of a ContainerTop operation. +type ContainerTopResult struct { + Processes [][]string + Titles []string +} + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, options ContainerTopOptions) (ContainerTopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerTopResult{}, err + } + + query := url.Values{} + if len(options.Arguments) > 0 { + query.Set("ps_args", strings.Join(options.Arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerTopResult{}, err + } + + var response container.TopResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerTopResult{Processes: response.Processes, Titles: response.Titles}, err +} diff --git a/vendor/github.com/moby/moby/client/container_unpause.go b/vendor/github.com/moby/moby/client/container_unpause.go new file mode 100644 index 000000000..627d60c96 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerUnpauseOptions holds options for [Client.ContainerUnpause]. +type ContainerUnpauseOptions struct { + // Add future optional parameters here. +} + +// ContainerUnpauseResult holds the result of [Client.ContainerUnpause], +type ContainerUnpauseResult struct { + // Add future fields here. +} + +// ContainerUnpause resumes the process execution within a container. +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUnpauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUnpauseResult{}, err + } + return ContainerUnpauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_update.go b/vendor/github.com/moby/moby/client/container_update.go new file mode 100644 index 000000000..a1d4d249a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/container" +) + +// ContainerUpdateOptions holds options for [Client.ContainerUpdate]. +type ContainerUpdateOptions struct { + Resources *container.Resources + RestartPolicy *container.RestartPolicy +} + +// ContainerUpdateResult is the result from updating a container. +type ContainerUpdateResult struct { + // Warnings encountered when updating the container. + Warnings []string +} + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, options ContainerUpdateOptions) (ContainerUpdateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUpdateResult{}, err + } + + updateConfig := container.UpdateConfig{} + if options.Resources != nil { + updateConfig.Resources = *options.Resources + } + if options.RestartPolicy != nil { + updateConfig.RestartPolicy = *options.RestartPolicy + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUpdateResult{}, err + } + + var response container.UpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerUpdateResult{Warnings: response.Warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/container_wait.go b/vendor/github.com/moby/moby/client/container_wait.go new file mode 100644 index 000000000..6f71ed051 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWaitOptions holds options for [Client.ContainerWait]. +type ContainerWaitOptions struct { + Condition container.WaitCondition +} + +// ContainerWaitResult defines the result from the [Client.ContainerWait] method. +type ContainerWaitResult struct { + Result <-chan container.WaitResponse + Error <-chan error +} + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either; +// +// - "not-running" ([container.WaitConditionNotRunning]) (default) +// - "next-exit" ([container.WaitConditionNextExit]) +// - "removed" ([container.WaitConditionRemoved]) +// +// ContainerWait blocks until the request has been acknowledged by the server +// (with a response header), then returns two channels on which the caller can +// wait for the exit status of the container or an error if there was a problem +// either beginning the wait request or in getting the response. This allows the +// caller to synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition ([container.WaitConditionNextExit]) before issuing a +// [Client.ContainerStart] request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, options ContainerWaitOptions) ContainerWaitResult { + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + containerID, err := trimID("container", containerID) + if err != nil { + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + query := url.Values{} + if options.Condition != "" { + query.Set("condition", string(options.Condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + go func() { + defer ensureReaderClosed(resp) + + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(resp.Body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } + return + } + + resultC <- res + }() + + return ContainerWaitResult{Result: resultC, Error: errC} +} diff --git a/vendor/github.com/moby/moby/client/distribution_inspect.go b/vendor/github.com/moby/moby/client/distribution_inspect.go new file mode 100644 index 000000000..ffbf869d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/distribution_inspect.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +// DistributionInspectResult holds the result of the DistributionInspect operation. +type DistributionInspectResult struct { + registry.DistributionInspect +} + +// DistributionInspectOptions holds options for the DistributionInspect operation. +type DistributionInspectOptions struct { + EncodedRegistryAuth string +} + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, imageRef string, options DistributionInspectOptions) (DistributionInspectResult, error) { + if imageRef == "" { + return DistributionInspectResult{}, objectNotFoundError{object: "distribution", id: imageRef} + } + + var headers http.Header + if options.EncodedRegistryAuth != "" { + headers = http.Header{ + registry.AuthHeader: {options.EncodedRegistryAuth}, + } + } + + // Contact the registry to retrieve digest and platform information + resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return DistributionInspectResult{}, err + } + + var distributionInspect registry.DistributionInspect + err = json.NewDecoder(resp.Body).Decode(&distributionInspect) + return DistributionInspectResult{DistributionInspect: distributionInspect}, err +} diff --git a/vendor/github.com/moby/moby/client/envvars.go b/vendor/github.com/moby/moby/client/envvars.go new file mode 100644 index 000000000..a02295d16 --- /dev/null +++ b/vendor/github.com/moby/moby/client/envvars.go @@ -0,0 +1,95 @@ +package client + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by [FromEnv] and [WithHostFromEnv] and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value must be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by [FromEnv] and [WithAPIVersionFromEnv] and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the [Client] for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to [EnvTLSVerify] below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection. Refer to [Protect the Docker daemon socket] + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in [EnvOverrideCertPath]. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to [Protect the Docker daemon socket] + // in the documentation and pages linked from that page to learn how to + // configure the daemon and client to use a TCP connection with TLS client + // authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. + // + // From the[crypto/tls.Config] documentation: + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/moby/moby/client/errors.go b/vendor/github.com/moby/moby/client/errors.go new file mode 100644 index 000000000..9fbfa7666 --- /dev/null +++ b/vendor/github.com/moby/moby/client/errors.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/http" + + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/errhttp" + "github.com/moby/moby/client/pkg/versions" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + error +} + +// Error returns a string representation of an errConnectionFailed +func (e errConnectionFailed) Error() string { + return e.error.Error() +} + +func (e errConnectionFailed) Unwrap() error { + return e.error +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// connectionFailed returns an error with host in the error message when connection +// to docker daemon failed. +func connectionFailed(host string) error { + var err error + if host == "" { + err = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + } else { + err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) + } + return errConnectionFailed{error: err} +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +// requiresVersion returns an error if the APIVersion required is less than the +// current supported version. +// +// It performs API-version negotiation if the Client is configured with this +// option, otherwise it assumes the latest API version is used. +func (cli *Client) requiresVersion(ctx context.Context, apiRequired, feature string) error { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + if err := cli.checkVersion(ctx); err != nil { + return err + } + if cli.version != "" && versions.LessThan(cli.version, apiRequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, apiRequired, cli.version) + } + return nil +} + +type httpError struct { + err error + errdef error +} + +func (e *httpError) Error() string { + return e.err.Error() +} + +func (e *httpError) Unwrap() error { + return e.err +} + +func (e *httpError) Is(target error) bool { + return errors.Is(e.errdef, target) +} + +// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code +func httpErrorFromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + base := errhttp.ToNative(statusCode) + if base != nil { + return &httpError{err: err, errdef: base} + } + + switch { + case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest: + // it's a client error + return err + case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError: + return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument} + case statusCode >= http.StatusInternalServerError && statusCode < 600: + return &httpError{err: err, errdef: cerrdefs.ErrInternal} + default: + return &httpError{err: err, errdef: cerrdefs.ErrUnknown} + } +} diff --git a/vendor/github.com/moby/moby/client/filters.go b/vendor/github.com/moby/moby/client/filters.go new file mode 100644 index 000000000..347ad5c68 --- /dev/null +++ b/vendor/github.com/moby/moby/client/filters.go @@ -0,0 +1,59 @@ +package client + +import ( + "encoding/json" + "net/url" +) + +// Filters describes a predicate for an API request. +// +// Each entry in the map is a filter term. +// Each term is evaluated against the set of values. +// A filter term is satisfied if any one of the values in the set is a match. +// An item matches the filters when all terms are satisfied. +// +// Like all other map types in Go, the zero value is empty and read-only. +type Filters map[string]map[string]bool + +// Add appends values to the value-set of term. +// +// The receiver f is returned for chaining. +// +// f := make(Filters).Add("name", "foo", "bar").Add("status", "exited") +func (f Filters) Add(term string, values ...string) Filters { + if _, ok := f[term]; !ok { + f[term] = make(map[string]bool) + } + for _, v := range values { + f[term][v] = true + } + return f +} + +// Clone returns a deep copy of f. +func (f Filters) Clone() Filters { + out := make(Filters, len(f)) + for term, values := range f { + inner := make(map[string]bool, len(values)) + for v, ok := range values { + inner[v] = ok + } + out[term] = inner + } + return out +} + +// updateURLValues sets the "filters" key in values to the marshalled value of +// f, replacing any existing values. When f is empty, any existing "filters" key +// is removed. +func (f Filters) updateURLValues(values url.Values) { + if len(f) > 0 { + b, err := json.Marshal(f) + if err != nil { + panic(err) // Marshaling builtin types should never fail + } + values.Set("filters", string(b)) + } else { + values.Del("filters") + } +} diff --git a/vendor/github.com/moby/moby/client/hijack.go b/vendor/github.com/moby/moby/client/hijack.go new file mode 100644 index 000000000..31c44e598 --- /dev/null +++ b/vendor/github.com/moby/moby/client/hijack.go @@ -0,0 +1,172 @@ +package client + +import ( + "bufio" + "context" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body any, headers map[string][]string) (HijackedResponse, error) { + jsonBody, err := jsonEncode(body) + if err != nil { + return HijackedResponse{}, err + } + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), jsonBody, headers) + if err != nil { + return HijackedResponse{}, err + } + conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp") + if err != nil { + return HijackedResponse{}, err + } + + return NewHijackedResponse(conn, mediaType), nil +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, http.NoBody) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + conn, _, err := setupHijackConn(cli.Dialer(), req, proto) + return conn, err +} + +func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { + ctx := req.Context() + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dialer(ctx) + if err != nil { + return nil, "", fmt.Errorf("cannot connect to the Docker daemon. Is 'docker daemon' running on this host?: %w", err) + } + defer func() { + if retErr != nil { + _ = conn.Close() + } + }() + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection prohibits + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + _ = tcpConn.SetKeepAlive(true) + _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + hc := &hijackedConn{conn, bufio.NewReader(conn)} + + // Server hijacks the connection, error 'connection closed' expected + resp, err := otelhttp.NewTransport(hc).RoundTrip(req) + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + + if hc.r.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite if the underlying connection + // implements it. + if _, ok := hc.Conn.(CloseWriter); ok { + conn = &hijackedConnCloseWriter{hc} + } else { + conn = hc + } + } else { + hc.r.Reset(nil) + } + + return conn, resp.Header.Get("Content-Type"), nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := req.Write(c.Conn); err != nil { + return nil, err + } + return http.ReadResponse(c.r, req) +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(CloseWriter) + return conn.CloseWrite() +} + +// NewHijackedResponse initializes a [HijackedResponse] type. +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and the container must be +// inspected. +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/image_build.go b/vendor/github.com/moby/moby/client/image_build.go new file mode 100644 index 000000000..5062ec5de --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build.go @@ -0,0 +1,179 @@ +package client + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" +) + +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an [io.ReadCloser] and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options ImageBuildOptions) (ImageBuildResult, error) { + query, err := cli.imageBuildOptionsToQuery(ctx, options) + if err != nil { + return ImageBuildResult{}, err + } + + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return ImageBuildResult{}, err + } + + headers := http.Header{} + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") + + resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return ImageBuildResult{}, err + } + + return ImageBuildResult{ + Body: resp.Body, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(_ context.Context, options ImageBuildOptions) (url.Values, error) { + query := url.Values{} + if len(options.Tags) > 0 { + query["t"] = options.Tags + } + if len(options.SecurityOpt) > 0 { + query["securityopt"] = options.SecurityOpt + } + if len(options.ExtraHosts) > 0 { + query["extrahosts"] = options.ExtraHosts + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if !options.Remove { + // only send value when opting out because the daemon's default is + // to remove intermediate containers after a successful build, + // + // TODO(thaJeztah): deprecate "Remove" option, and provide a "NoRemove" or "Keep" option instead. + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + // TODO(thaJeztah): squash is experimental, and deprecated when using BuildKit? + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + if options.CPUSetCPUs != "" { + query.Set("cpusetcpus", options.CPUSetCPUs) + } + if options.NetworkMode != "" && options.NetworkMode != network.NetworkDefault { + query.Set("networkmode", options.NetworkMode) + } + if options.CPUSetMems != "" { + query.Set("cpusetmems", options.CPUSetMems) + } + if options.CPUShares != 0 { + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + } + if options.CPUQuota != 0 { + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + } + if options.CPUPeriod != 0 { + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + } + if options.Memory != 0 { + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + } + if options.MemorySwap != 0 { + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + } + if options.CgroupParent != "" { + query.Set("cgroupparent", options.CgroupParent) + } + if options.ShmSize != 0 { + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + } + if options.Dockerfile != "" { + query.Set("dockerfile", options.Dockerfile) + } + if options.Target != "" { + query.Set("target", options.Target) + } + if len(options.Ulimits) != 0 { + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + } + if len(options.BuildArgs) != 0 { + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + } + if len(options.Labels) != 0 { + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + } + if len(options.CacheFrom) != 0 { + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + } + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return query, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") + } + query.Set("platform", formatPlatform(options.Platforms[0])) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + if options.Version != "" { + query.Set("version", string(options.Version)) + } + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/image_build_opts.go b/vendor/github.com/moby/moby/client/image_build_opts.go new file mode 100644 index 000000000..f65ad0f2b --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build_opts.go @@ -0,0 +1,79 @@ +package client + +import ( + "io" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*container.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + // Platforms selects the platforms to build the image for. Multiple platforms + // can be provided if the daemon supports multi-platform builds. + Platforms []ocispec.Platform + // Version specifies the version of the underlying builder to use + Version build.BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// ImageBuildResult holds information +// returned by a server after building +// an image. +type ImageBuildResult struct { + Body io.ReadCloser +} diff --git a/vendor/github.com/moby/moby/client/image_history.go b/vendor/github.com/moby/moby/client/image_history.go new file mode 100644 index 000000000..8618f1553 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageHistoryWithPlatform sets the platform for the image history operation. +func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption { + return imageHistoryOptionFunc(func(opt *imageHistoryOpts) error { + if opt.apiOptions.Platform != nil { + return fmt.Errorf("platform already set to %s", *opt.apiOptions.Platform) + } + opt.apiOptions.Platform = &platform + return nil + }) +} + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) (ImageHistoryResult, error) { + query := url.Values{} + + var opts imageHistoryOpts + for _, o := range historyOpts { + if err := o.Apply(&opts); err != nil { + return ImageHistoryResult{}, err + } + } + + if opts.apiOptions.Platform != nil { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return ImageHistoryResult{}, err + } + + p, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return ImageHistoryResult{}, err + } + query.Set("platform", p) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageHistoryResult{}, err + } + + var history ImageHistoryResult + err = json.NewDecoder(resp.Body).Decode(&history.Items) + return history, err +} diff --git a/vendor/github.com/moby/moby/client/image_history_opts.go b/vendor/github.com/moby/moby/client/image_history_opts.go new file mode 100644 index 000000000..7fc57afd1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history_opts.go @@ -0,0 +1,29 @@ +package client + +import ( + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageHistoryOption is a type representing functional options for the image history operation. +type ImageHistoryOption interface { + Apply(*imageHistoryOpts) error +} +type imageHistoryOptionFunc func(opt *imageHistoryOpts) error + +func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error { + return f(o) +} + +type imageHistoryOpts struct { + apiOptions imageHistoryOptions +} + +type imageHistoryOptions struct { + // Platform from the manifest list to use for history. + Platform *ocispec.Platform +} + +type ImageHistoryResult struct { + Items []image.HistoryResponseItem +} diff --git a/vendor/github.com/moby/moby/client/image_import.go b/vendor/github.com/moby/moby/client/image_import.go new file mode 100644 index 000000000..f383f76d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "io" + "net/url" + + "github.com/distribution/reference" +) + +// ImageImportResult holds the response body returned by the daemon for image import. +type ImageImportResult interface { + io.ReadCloser +} + +// ImageImport creates a new image based on the source options. It returns the +// JSON content in the [ImageImportResult]. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + if source.SourceName != "" { + query.Set("fromSrc", source.SourceName) + } + if ref != "" { + query.Set("repo", ref) + } + if options.Tag != "" { + query.Set("tag", options.Tag) + } + if options.Message != "" { + query.Set("message", options.Message) + } + if p := formatPlatform(options.Platform); p != "unknown" { + // TODO(thaJeztah): would we ever support mutiple platforms here? (would require multiple rootfs tars as well?) + query.Set("platform", p) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return &imageImportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// ImageImportResult holds the response body returned by the daemon for image import. +type imageImportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageImportResult)(nil) + _ ImageImportResult = (*imageImportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_import_opts.go b/vendor/github.com/moby/moby/client/image_import_opts.go new file mode 100644 index 000000000..c70473bdd --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import_opts.go @@ -0,0 +1,21 @@ +package client + +import ( + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform ocispec.Platform // Platform is the target platform of the image +} diff --git a/vendor/github.com/moby/moby/client/image_inspect.go b/vendor/github.com/moby/moby/client/image_inspect.go new file mode 100644 index 000000000..635931fd0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect.go @@ -0,0 +1,62 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" +) + +// ImageInspect returns the image information. +func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (ImageInspectResult, error) { + if imageID == "" { + return ImageInspectResult{}, objectNotFoundError{object: "image", id: imageID} + } + + var opts imageInspectOpts + for _, opt := range inspectOpts { + if err := opt.Apply(&opts); err != nil { + return ImageInspectResult{}, fmt.Errorf("error applying image inspect option: %w", err) + } + } + + query := url.Values{} + if opts.apiOptions.Manifests { + if err := cli.requiresVersion(ctx, "1.48", "manifests"); err != nil { + return ImageInspectResult{}, err + } + query.Set("manifests", "1") + } + + if opts.apiOptions.Platform != nil { + if err := cli.requiresVersion(ctx, "1.49", "platform"); err != nil { + return ImageInspectResult{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return ImageInspectResult{}, err + } + query.Set("platform", platform) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageInspectResult{}, err + } + + buf := opts.raw + if buf == nil { + buf = &bytes.Buffer{} + } + + if _, err := io.Copy(buf, resp.Body); err != nil { + return ImageInspectResult{}, err + } + + var response ImageInspectResult + err = json.Unmarshal(buf.Bytes(), &response) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/image_inspect_opts.go b/vendor/github.com/moby/moby/client/image_inspect_opts.go new file mode 100644 index 000000000..266c1fe81 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect_opts.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageInspectOption is a type representing functional options for the image inspect operation. +type ImageInspectOption interface { + Apply(*imageInspectOpts) error +} +type imageInspectOptionFunc func(opt *imageInspectOpts) error + +func (f imageInspectOptionFunc) Apply(o *imageInspectOpts) error { + return f(o) +} + +// ImageInspectWithRawResponse instructs the client to additionally store the +// raw inspect response in the provided buffer. +func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption { + return imageInspectOptionFunc(func(opts *imageInspectOpts) error { + opts.raw = raw + return nil + }) +} + +// ImageInspectWithManifests sets manifests API option for the image inspect operation. +// This option is only available for API version 1.48 and up. +// With this option set, the image inspect operation response includes +// the [image.InspectResponse.Manifests] field if the server is multi-platform +// capable. +func ImageInspectWithManifests(manifests bool) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Manifests = manifests + return nil + }) +} + +// ImageInspectWithPlatform sets platform API option for the image inspect operation. +// This option is only available for API version 1.49 and up. +// With this option set, the image inspect operation returns information for the +// specified platform variant of the multi-platform image. +func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Platform = platform + return nil + }) +} + +type imageInspectOpts struct { + raw *bytes.Buffer + apiOptions imageInspectOptions +} + +type imageInspectOptions struct { + // Manifests returns the image manifests. + Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform +} + +type ImageInspectResult struct { + image.InspectResponse +} diff --git a/vendor/github.com/moby/moby/client/image_list.go b/vendor/github.com/moby/moby/client/image_list.go new file mode 100644 index 000000000..6df3c66e1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list.go @@ -0,0 +1,53 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/client/pkg/versions" +) + +// ImageList returns a list of images in the docker host. +// +// Experimental: Set the [image.ListOptions.Manifest] option +// to include [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. +func (cli *Client) ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) { + var images []image.Summary + + query := url.Values{} + + options.Filters.updateURLValues(query) + if options.All { + query.Set("all", "1") + } + if options.SharedSize { + query.Set("shared-size", "1") + } + if options.Manifests { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + if err := cli.checkVersion(ctx); err != nil { + return ImageListResult{}, err + } + + if versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } + } + + resp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageListResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&images) + return ImageListResult{Items: images}, err +} diff --git a/vendor/github.com/moby/moby/client/image_list_opts.go b/vendor/github.com/moby/moby/client/image_list_opts.go new file mode 100644 index 000000000..a497d5790 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list_opts.go @@ -0,0 +1,24 @@ +package client + +import "github.com/moby/moby/api/types/image" + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters Filters + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool +} + +// ImageListResult holds the result from ImageList. +type ImageListResult struct { + Items []image.Summary +} diff --git a/vendor/github.com/moby/moby/client/image_load.go b/vendor/github.com/moby/moby/client/image_load.go new file mode 100644 index 000000000..ec5fcae6e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load.go @@ -0,0 +1,64 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// ImageLoadResult returns information to the client about a load process. +// It implements [io.ReadCloser] and must be closed to avoid a resource leak. +type ImageLoadResult interface { + io.ReadCloser +} + +// ImageLoad loads an image in the docker host from the client host. It's up +// to the caller to close the [ImageLoadResult] returned by this function. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (ImageLoadResult, error) { + var opts imageLoadOpts + for _, opt := range loadOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("quiet", "0") + if opts.apiOptions.Quiet { + query.Set("quiet", "1") + } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return nil, err + } + + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + + resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) + if err != nil { + return nil, err + } + return &imageLoadResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// imageLoadResult returns information to the client about a load process. +type imageLoadResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageLoadResult)(nil) + _ ImageLoadResult = (*imageLoadResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_load_opts.go b/vendor/github.com/moby/moby/client/image_load_opts.go new file mode 100644 index 000000000..aeb4fcf83 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load_opts.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageLoadOption is a type representing functional options for the image load operation. +type ImageLoadOption interface { + Apply(*imageLoadOpts) error +} +type imageLoadOptionFunc func(opt *imageLoadOpts) error + +func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error { + return f(o) +} + +type imageLoadOpts struct { + apiOptions imageLoadOptions +} + +type imageLoadOptions struct { + // Quiet suppresses progress output + Quiet bool + + // Platforms selects the platforms to load if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} + +// ImageLoadWithQuiet sets the quiet option for the image load operation. +func ImageLoadWithQuiet(quiet bool) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + opt.apiOptions.Quiet = quiet + return nil + }) +} + +// ImageLoadWithPlatforms sets the platforms to be loaded from the image. +// +// Platform is an optional parameter that specifies the platform to load from +// the provided multi-platform image. Passing a platform only has an effect +// if the input image is a multi-platform image. +func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} diff --git a/vendor/github.com/moby/moby/client/image_prune.go b/vendor/github.com/moby/moby/client/image_prune.go new file mode 100644 index 000000000..7f3a25b89 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/image" +) + +// ImagePruneOptions holds parameters to prune images. +type ImagePruneOptions struct { + Filters Filters +} + +// ImagePruneResult holds the result from the [Client.ImagePrune] method. +type ImagePruneResult struct { + Report image.PruneReport +} + +// ImagePrune requests the daemon to delete unused data +func (cli *Client) ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImagePruneResult{}, err + } + + var report image.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ImagePruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ImagePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/image_pull.go b/vendor/github.com/moby/moby/client/image_pull.go new file mode 100644 index 000000000..11c0afa41 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePullResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can: +// - use [ImagePullResponse.Wait] to wait for pull to complete +// - use [ImagePullResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePullResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePullResponse.Close] after processing. +func (cli *Client) ImagePull(ctx context.Context, refStr string, options ImagePullOptions) (ImagePullResponse, error) { + // FIXME(vdemeester): there is currently used in a few way in docker/docker + // - if not in trusted content, ref is used to pass the whole reference, and tag is empty + // - if in trusted content, ref is used to pass the reference name, and tag for the digest + // + // ref; https://github.com/docker-archive-public/docker.engine-api/pull/162 + + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", ref.Name()) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return nil, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") + } + query.Set("platform", formatPlatform(options.Platforms[0])) + } + resp, err := cli.tryImageCreate(ctx, query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImageCreate(ctx, query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + + return internal.NewJSONMessageStream(resp.Body), nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + return cli.post(ctx, "/images/create", query, nil, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_pull_opts.go b/vendor/github.com/moby/moby/client/image_pull_opts.go new file mode 100644 index 000000000..1b78185dd --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platforms selects the platforms to pull. Multiple platforms can be + // specified if the image ia a multi-platform image. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/client/image_push.go b/vendor/github.com/moby/moby/client/image_push.go new file mode 100644 index 000000000..5dd8bc140 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push.go @@ -0,0 +1,98 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePushResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can +// - use [ImagePushResponse.Wait] to wait for push to complete +// - use [ImagePushResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePushResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePushResponse.Close] after processing. +func (cli *Client) ImagePush(ctx context.Context, image string, options ImagePushOptions) (ImagePushResponse, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, ok := ref.(reference.Digested); ok { + return nil, errors.New("cannot push a digest reference") + } + + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + if options.Platform != nil { + if err := cli.requiresVersion(ctx, "1.46", "platform"); err != nil { + return nil, err + } + + p := *options.Platform + pJson, err := json.Marshal(p) + if err != nil { + return nil, fmt.Errorf("invalid platform: %v", err) + } + + query.Set("platform", string(pJson)) + } + + resp, err := cli.tryImagePush(ctx, ref.Name(), query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImagePush(ctx, ref.Name(), query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + return internal.NewJSONMessageStream(resp.Body), nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + + // Always send a body (which may be an empty JSON document ("{}")) to prevent + // EOF errors on older daemons which had faulty fallback code for handling + // authentication in the body when no auth-header was set, resulting in; + // + // Error response from daemon: bad parameters and missing X-Registry-Auth: invalid X-Registry-Auth header: EOF + // + // We use [http.NoBody], which gets marshaled to an empty JSON document. + // + // see: https://github.com/moby/moby/commit/ea29dffaa541289591aa44fa85d2a596ce860e16 + return cli.post(ctx, "/images/"+imageID+"/push", query, http.NoBody, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_push_opts.go b/vendor/github.com/moby/moby/client/image_push_opts.go new file mode 100644 index 000000000..591c6b605 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push_opts.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePushOptions holds information to push images. +type ImagePushOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platform is an optional field that selects a specific platform to push + // when the image is a multi-platform image. + // Using this will only push a single platform-specific manifest. + Platform *ocispec.Platform `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/client/image_remove.go b/vendor/github.com/moby/moby/client/image_remove.go new file mode 100644 index 000000000..095b4f04c --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/image" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options ImageRemoveOptions) (ImageRemoveResult, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + if len(options.Platforms) > 0 { + p, err := encodePlatforms(options.Platforms...) + if err != nil { + return ImageRemoveResult{}, err + } + query["platforms"] = p + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageRemoveResult{}, err + } + + var dels []image.DeleteResponse + err = json.NewDecoder(resp.Body).Decode(&dels) + return ImageRemoveResult{Items: dels}, err +} diff --git a/vendor/github.com/moby/moby/client/image_remove_opts.go b/vendor/github.com/moby/moby/client/image_remove_opts.go new file mode 100644 index 000000000..3b5d8a77f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove_opts.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Platforms []ocispec.Platform + Force bool + PruneChildren bool +} + +// ImageRemoveResult holds the delete responses returned by the daemon. +type ImageRemoveResult struct { + Items []image.DeleteResponse +} diff --git a/vendor/github.com/moby/moby/client/image_save.go b/vendor/github.com/moby/moby/client/image_save.go new file mode 100644 index 000000000..508f88b7d --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save.go @@ -0,0 +1,59 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +type ImageSaveResult interface { + io.ReadCloser +} + +// ImageSave retrieves one or more images from the docker host as an +// [ImageSaveResult]. Callers should close the reader, but the underlying +// [io.ReadCloser] is automatically closed if the context is canceled, +// +// Platforms is an optional parameter that specifies the platforms to save +// from the image. Passing a platform only has an effect if the input image +// is a multi-platform image. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (ImageSaveResult, error) { + var opts imageSaveOpts + for _, opt := range saveOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + + query := url.Values{ + "names": imageIDs, + } + + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return nil, err + } + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return &imageSaveResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type imageSaveResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageSaveResult)(nil) + _ ImageSaveResult = (*imageSaveResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_save_opts.go b/vendor/github.com/moby/moby/client/image_save_opts.go new file mode 100644 index 000000000..9c0b3b74a --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save_opts.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageSaveOption interface { + Apply(*imageSaveOpts) error +} + +type imageSaveOptionFunc func(opt *imageSaveOpts) error + +func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error { + return f(o) +} + +// ImageSaveWithPlatforms sets the platforms to be saved from the image. It +// produces an error if platforms are already set. This option only has an +// effect if the input image is a multi-platform image. +func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { + // TODO(thaJeztah): verify the GoDoc; do we produce an error for a single-platform image without the given platform? + return imageSaveOptionFunc(func(opt *imageSaveOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} + +type imageSaveOpts struct { + apiOptions imageSaveOptions +} + +type imageSaveOptions struct { + // Platforms selects the platforms to save if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/client/image_search.go b/vendor/github.com/moby/moby/client/image_search.go new file mode 100644 index 000000000..6e280906a --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "strconv" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/registry" +) + +// ImageSearch makes the docker host search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + options.Filters.updateURLValues(query) + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) + if privilegeErr != nil { + return ImageSearchResult{}, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return ImageSearchResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&results) + return ImageSearchResult{Items: results}, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { + return cli.get(ctx, "/images/search", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} diff --git a/vendor/github.com/moby/moby/client/image_search_opts.go b/vendor/github.com/moby/moby/client/image_search_opts.go new file mode 100644 index 000000000..95a7d41fa --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search_opts.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// ImageSearchResult wraps results returned by ImageSearch. +type ImageSearchResult struct { + Items []registry.SearchResult +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + Filters Filters + Limit int +} diff --git a/vendor/github.com/moby/moby/client/image_tag.go b/vendor/github.com/moby/moby/client/image_tag.go new file mode 100644 index 000000000..5566f4624 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag.go @@ -0,0 +1,48 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/distribution/reference" +) + +type ImageTagOptions struct { + Source string + Target string +} + +type ImageTagResult struct{} + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) { + source := options.Source + target := options.Target + + if _, err := reference.ParseAnyReference(source); err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", source, err) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", target, err) + } + + if _, ok := ref.(reference.Digested); ok { + return ImageTagResult{}, errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", ref.Name()) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + defer ensureReaderClosed(resp) + return ImageTagResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/internal/json-stream.go b/vendor/github.com/moby/moby/client/internal/json-stream.go new file mode 100644 index 000000000..552978f9a --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/json-stream.go @@ -0,0 +1,50 @@ +package internal + +import ( + "encoding/json" + "io" + "slices" + + "github.com/moby/moby/api/types" +) + +const rs = 0x1E + +type DecoderFn func(v any) error + +// NewJSONStreamDecoder builds adequate DecoderFn to read json records formatted with specified content-type +func NewJSONStreamDecoder(r io.Reader, contentType string) DecoderFn { + switch contentType { + case types.MediaTypeJSONSequence: + return json.NewDecoder(NewRSFilterReader(r)).Decode + case types.MediaTypeJSON, types.MediaTypeNDJSON: + fallthrough + default: + return json.NewDecoder(r).Decode + } +} + +// RSFilterReader wraps an io.Reader and filters out ASCII RS characters +type RSFilterReader struct { + reader io.Reader + buffer []byte +} + +// NewRSFilterReader creates a new RSFilterReader that filters out RS characters +func NewRSFilterReader(r io.Reader) *RSFilterReader { + return &RSFilterReader{ + reader: r, + buffer: make([]byte, 4096), // Internal buffer for reading chunks + } +} + +// Read implements the io.Reader interface, filtering out RS characters +func (r *RSFilterReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + n, err = r.reader.Read(p) + filtered := slices.DeleteFunc(p[:n], func(b byte) bool { return b == rs }) + return len(filtered), err +} diff --git a/vendor/github.com/moby/moby/client/internal/jsonmessages.go b/vendor/github.com/moby/moby/client/internal/jsonmessages.go new file mode 100644 index 000000000..ebbb5faa3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/jsonmessages.go @@ -0,0 +1,79 @@ +package internal + +import ( + "context" + "encoding/json" + "errors" + "io" + "iter" + "sync" + + "github.com/moby/moby/api/types/jsonstream" +) + +func NewJSONMessageStream(rc io.ReadCloser) stream { + if rc == nil { + panic("nil io.ReadCloser") + } + return stream{ + rc: rc, + close: sync.OnceValue(rc.Close), + } +} + +type stream struct { + rc io.ReadCloser + close func() error +} + +// Read implements io.ReadCloser +func (r stream) Read(p []byte) (n int, err error) { + if r.rc == nil { + return 0, io.EOF + } + return r.rc.Read(p) +} + +// Close implements io.ReadCloser +func (r stream) Close() error { + if r.close == nil { + return nil + } + return r.close() +} + +// JSONMessages decodes the response stream as a sequence of JSONMessages. +// if stream ends or context is cancelled, the underlying [io.Reader] is closed. +func (r stream) JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] { + context.AfterFunc(ctx, func() { + _ = r.Close() + }) + dec := json.NewDecoder(r) + return func(yield func(jsonstream.Message, error) bool) { + defer r.Close() + for { + var jm jsonstream.Message + err := dec.Decode(&jm) + if errors.Is(err, io.EOF) { + break + } + if ctx.Err() != nil { + yield(jm, ctx.Err()) + return + } + if !yield(jm, err) { + return + } + } + } +} + +// Wait waits for operation to complete and detects errors reported as JSONMessage +func (r stream) Wait(ctx context.Context) error { + for _, err := range r.JSONMessages(ctx) { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go new file mode 100644 index 000000000..7b175f0c9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go @@ -0,0 +1,131 @@ +package timestamp + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3 + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has +// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())). +// If the incoming nanosecond portion is longer than 9 digits it is truncated. +// The expectation is that the seconds and nanoseconds will be used to create a +// time variable. For example: +// +// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0) +// since := time.Unix(seconds, nanoseconds) +// +// returns seconds as defaultSeconds if value == "" +func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) { + if value == "" { + return defaultSeconds, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) { + s, n, ok := strings.Cut(value, ".") + sec, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return sec, 0, err + } + if !ok { + return sec, 0, nil + } + nsec, err := strconv.ParseInt(n, 10, 64) + if err != nil { + return sec, nsec, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n)))) + return sec, nsec, nil +} diff --git a/vendor/github.com/moby/moby/client/login.go b/vendor/github.com/moby/moby/client/login.go new file mode 100644 index 000000000..b295080ab --- /dev/null +++ b/vendor/github.com/moby/moby/client/login.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +type RegistryLoginOptions struct { + Username string + Password string + ServerAddress string + IdentityToken string + RegistryToken string +} + +// RegistryLoginResult holds the result of a RegistryLogin query. +type RegistryLoginResult struct { + Auth registry.AuthResponse +} + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, options RegistryLoginOptions) (RegistryLoginResult, error) { + auth := registry.AuthConfig{ + Username: options.Username, + Password: options.Password, + ServerAddress: options.ServerAddress, + IdentityToken: options.IdentityToken, + RegistryToken: options.RegistryToken, + } + + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return RegistryLoginResult{}, err + } + + var response registry.AuthResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return RegistryLoginResult{Auth: response}, err +} diff --git a/vendor/github.com/moby/moby/client/network_connect.go b/vendor/github.com/moby/moby/client/network_connect.go new file mode 100644 index 000000000..40db955a9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkConnectOptions represents the data to be used to connect a container to the +// network. +type NetworkConnectOptions struct { + Container string + EndpointConfig *network.EndpointSettings +} + +// NetworkConnectResult represents the result of a NetworkConnect operation. +type NetworkConnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID string, options NetworkConnectOptions) (NetworkConnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkConnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkConnectResult{}, err + } + + nc := network.ConnectRequest{ + Container: containerID, + EndpointConfig: options.EndpointConfig, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + defer ensureReaderClosed(resp) + return NetworkConnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_create.go b/vendor/github.com/moby/moby/client/network_create.go new file mode 100644 index 000000000..25ea32af4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create.go @@ -0,0 +1,69 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/network" +) + +// NetworkCreateOptions holds options to create a network. +type NetworkCreateOptions struct { + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool // EnableIPv6 represents whether to enable IPv6. + IPAM *network.IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom string // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkCreateResult represents the result of a network create operation. +type NetworkCreateResult struct { + ID string + + Warning []string +} + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) { + req := network.CreateRequest{ + Name: name, + Driver: options.Driver, + Scope: options.Scope, + EnableIPv4: options.EnableIPv4, + EnableIPv6: options.EnableIPv6, + IPAM: options.IPAM, + Internal: options.Internal, + Attachable: options.Attachable, + Ingress: options.Ingress, + ConfigOnly: options.ConfigOnly, + Options: options.Options, + Labels: options.Labels, + } + + if options.ConfigFrom != "" { + req.ConfigFrom = &network.ConfigReference{Network: options.ConfigFrom} + } + + resp, err := cli.post(ctx, "/networks/create", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkCreateResult{}, err + } + + var response network.CreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + + var warnings []string + if response.Warning != "" { + warnings = []string{response.Warning} + } + + return NetworkCreateResult{ID: response.ID, Warning: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect.go b/vendor/github.com/moby/moby/client/network_disconnect.go new file mode 100644 index 000000000..64a1796b8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkDisconnectOptions represents the data to be used to disconnect a container +// from the network. +type NetworkDisconnectOptions struct { + Container string + Force bool +} + +// NetworkDisconnectResult represents the result of a NetworkDisconnect operation. +type NetworkDisconnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkDisconnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkDisconnectResult{}, err + } + + req := network.DisconnectRequest{ + Container: containerID, + Force: options.Force, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, req, nil) + defer ensureReaderClosed(resp) + return NetworkDisconnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect.go b/vendor/github.com/moby/moby/client/network_inspect.go new file mode 100644 index 000000000..775780527 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkInspectResult contains the result of a network inspection. +type NetworkInspectResult struct { + Network network.Inspect + Raw json.RawMessage +} + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options NetworkInspectOptions) (NetworkInspectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkInspectResult{}, err + } + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + + resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + return NetworkInspectResult{}, err + } + + var out NetworkInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Network) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect_opts.go b/vendor/github.com/moby/moby/client/network_inspect_opts.go new file mode 100644 index 000000000..d83f113e1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect_opts.go @@ -0,0 +1,7 @@ +package client + +// NetworkInspectOptions holds parameters to inspect network. +type NetworkInspectOptions struct { + Scope string + Verbose bool +} diff --git a/vendor/github.com/moby/moby/client/network_list.go b/vendor/github.com/moby/moby/client/network_list.go new file mode 100644 index 000000000..d65f56097 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkListResult holds the result from the [Client.NetworkList] method. +type NetworkListResult struct { + Items []network.Summary +} + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkListResult{}, err + } + var res NetworkListResult + err = json.NewDecoder(resp.Body).Decode(&res.Items) + return res, err +} diff --git a/vendor/github.com/moby/moby/client/network_list_opts.go b/vendor/github.com/moby/moby/client/network_list_opts.go new file mode 100644 index 000000000..0d21ab313 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list_opts.go @@ -0,0 +1,6 @@ +package client + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters Filters +} diff --git a/vendor/github.com/moby/moby/client/network_prune.go b/vendor/github.com/moby/moby/client/network_prune.go new file mode 100644 index 000000000..55f7cac02 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkPruneOptions holds parameters to prune networks. +type NetworkPruneOptions struct { + Filters Filters +} + +// NetworkPruneResult holds the result from the [Client.NetworkPrune] method. +type NetworkPruneResult struct { + Report network.PruneReport +} + +// NetworkPrune requests the daemon to delete unused networks +func (cli *Client) NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkPruneResult{}, err + } + + var report network.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return NetworkPruneResult{}, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return NetworkPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/network_remove.go b/vendor/github.com/moby/moby/client/network_remove.go new file mode 100644 index 000000000..2bceb0d93 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" +) + +// NetworkRemoveOptions specifies options for removing a network. +type NetworkRemoveOptions struct { + // No options currently; placeholder for future use. +} + +// NetworkRemoveResult represents the result of a network removal operation. +type NetworkRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string, options NetworkRemoveOptions) (NetworkRemoveResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return NetworkRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/node_inspect.go b/vendor/github.com/moby/moby/client/node_inspect.go new file mode 100644 index 000000000..cd4ce0119 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeInspectOptions holds parameters to inspect nodes with. +type NodeInspectOptions struct{} + +type NodeInspectResult struct { + Node swarm.Node + Raw json.RawMessage +} + +// NodeInspect returns the node information. +func (cli *Client) NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeInspectResult{}, err + } + resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeInspectResult{}, err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return NodeInspectResult{}, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return NodeInspectResult{Node: response, Raw: body}, err +} diff --git a/vendor/github.com/moby/moby/client/node_list.go b/vendor/github.com/moby/moby/client/node_list.go new file mode 100644 index 000000000..1a1b57922 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters Filters +} + +type NodeListResult struct { + Items []swarm.Node +} + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeListResult{}, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.Body).Decode(&nodes) + return NodeListResult{Items: nodes}, err +} diff --git a/vendor/github.com/moby/moby/client/node_remove.go b/vendor/github.com/moby/moby/client/node_remove.go new file mode 100644 index 000000000..56c39d67a --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove.go @@ -0,0 +1,29 @@ +package client + +import ( + "context" + "net/url" +) + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} +type NodeRemoveResult struct{} + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return NodeRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/node_update.go b/vendor/github.com/moby/moby/client/node_update.go new file mode 100644 index 000000000..4bc7c3b69 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update.go @@ -0,0 +1,30 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeUpdateOptions holds parameters to update nodes with. +type NodeUpdateOptions struct { + Version swarm.Version + Spec swarm.NodeSpec +} + +type NodeUpdateResult struct{} + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return NodeUpdateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/ping.go b/vendor/github.com/moby/moby/client/ping.go new file mode 100644 index 000000000..d315e4b98 --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping.go @@ -0,0 +1,166 @@ +package client + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/swarm" +) + +// PingOptions holds options for [client.Ping]. +type PingOptions struct { + // NegotiateAPIVersion queries the API and updates the version to match the API + // version. NegotiateAPIVersion downgrades the client's API version to match the + // APIVersion if the ping version is lower than the default version. If the API + // version reported by the server is higher than the maximum version supported + // by the client, it uses the client's maximum version. + // + // If a manual override is in place, either through the "DOCKER_API_VERSION" + // ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized + // with a fixed version ([WithAPIVersion]), no negotiation is performed. + // + // If the API server's ping response does not contain an API version, or if the + // client did not get a successful ping response, it assumes it is connected with + // an old daemon that does not support API version negotiation, in which case it + // downgrades to the lowest supported API version. + NegotiateAPIVersion bool + + // ForceNegotiate forces the client to re-negotiate the API version, even if + // API-version negotiation already happened or it the client is configured + // with a fixed version (using [WithAPIVersion] or [WithAPIVersionFromEnv]). + // + // This option has no effect if NegotiateAPIVersion is not set. + ForceNegotiate bool +} + +// PingResult holds the result of a [Client.Ping] API call. +type PingResult struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion build.BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *SwarmStatus +} + +// SwarmStatus provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type SwarmStatus struct { + // NodeState represents the state of the node. + NodeState swarm.LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. It ignores internal server errors returned by the API, which +// may be returned if the daemon is in an unhealthy state, but returns errors +// for other non-success status codes, failing to connect to the API, or failing +// to parse the API response. +func (cli *Client) Ping(ctx context.Context, options PingOptions) (PingResult, error) { + if !options.NegotiateAPIVersion { + // No API version negotiation needed; just return ping response. + return cli.ping(ctx) + } + if cli.negotiated.Load() && !options.ForceNegotiate { + // API version was already negotiated or manually set. + return cli.ping(ctx) + } + + // Ensure exclusive write access to version and negotiated fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + + ping, err := cli.ping(ctx) + if err != nil { + return ping, err + } + + if cli.negotiated.Load() && !options.ForceNegotiate { + // API version was already negotiated or manually set. + // + // We check cli.negotiated again under lock, to account for race + // conditions with the check at the start of this function. + return ping, nil + } + + if ping.APIVersion == "" { + cli.setAPIVersion(MaxAPIVersion) + return ping, nil + } + + return ping, cli.negotiateAPIVersion(ping.APIVersion) +} + +func (cli *Client) ping(ctx context.Context) (PingResult, error) { + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return PingResult{}, err + } + resp, err := cli.doRequest(req) + defer ensureReaderClosed(resp) + if err == nil && resp.StatusCode == http.StatusOK { + // Fast-path; successfully connected using a HEAD request and + // we got a "OK" (200) status. For non-200 status-codes, we fall + // back to doing a GET request, as a HEAD request won't have a + // response-body to get error details from. + return newPingResult(resp), nil + } + // close to allow reusing connection. + ensureReaderClosed(resp) + + // HEAD failed or returned a non-OK status; fallback to GET. + req2, err := cli.buildRequest(ctx, http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return PingResult{}, err + } + resp, err = cli.doRequest(req2) + defer ensureReaderClosed(resp) + if err != nil { + // Failed to connect. + return PingResult{}, err + } + + // GET request succeeded but may have returned a non-200 status. + // Return a Ping response, together with any error returned by + // the API server. + return newPingResult(resp), checkResponseErr(resp) +} + +func newPingResult(resp *http.Response) PingResult { + if resp == nil { + return PingResult{} + } + var swarmStatus *SwarmStatus + if si := resp.Header.Get("Swarm"); si != "" { + state, role, _ := strings.Cut(si, "/") + swarmStatus = &SwarmStatus{ + NodeState: swarm.LocalNodeState(state), + ControlAvailable: role == "manager", + } + } + + return PingResult{ + APIVersion: resp.Header.Get("Api-Version"), + OSType: resp.Header.Get("Ostype"), + Experimental: resp.Header.Get("Docker-Experimental") == "true", + BuilderVersion: build.BuilderVersion(resp.Header.Get("Builder-Version")), + SwarmStatus: swarmStatus, + } +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/moby/moby/client/pkg/progress/progress.go similarity index 92% rename from vendor/github.com/docker/docker/pkg/progress/progress.go rename to vendor/github.com/moby/moby/client/pkg/progress/progress.go index 3f9887c5b..7d2d59606 100644 --- a/vendor/github.com/docker/docker/pkg/progress/progress.go +++ b/vendor/github.com/moby/moby/client/pkg/progress/progress.go @@ -23,7 +23,7 @@ type Progress struct { // Aux contains extra information not presented to the user, such as // digests for push signing. - Aux interface{} + Aux any LastUpdate bool } @@ -71,7 +71,7 @@ func Update(out Output, id, action string) { // Updatef is a convenience function to write a printf-formatted progress update // to the channel. -func Updatef(out Output, id, format string, a ...interface{}) { +func Updatef(out Output, id, format string, a ...any) { Update(out, id, fmt.Sprintf(format, a...)) } @@ -82,12 +82,12 @@ func Message(out Output, id, message string) { // Messagef is a convenience function to write a printf-formatted progress // message to the channel. -func Messagef(out Output, id, format string, a ...interface{}) { +func Messagef(out Output, id, format string, a ...any) { Message(out, id, fmt.Sprintf(format, a...)) } // Aux sends auxiliary information over a progress interface, which will not be // formatted for the UI. This is used for things such as push signing. -func Aux(out Output, a interface{}) { +func Aux(out Output, a any) { out.WriteProgress(Progress{Aux: a}) } diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/moby/moby/client/pkg/progress/progressreader.go similarity index 100% rename from vendor/github.com/docker/docker/pkg/progress/progressreader.go rename to vendor/github.com/moby/moby/client/pkg/progress/progressreader.go diff --git a/vendor/github.com/moby/moby/client/pkg/streamformatter/streamformatter.go b/vendor/github.com/moby/moby/client/pkg/streamformatter/streamformatter.go new file mode 100644 index 000000000..df0aebfe2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/pkg/streamformatter/streamformatter.go @@ -0,0 +1,203 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/go-units" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/client/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// formatStatus formats the specified objects according to the specified format (and id). +func formatStatus(id, format string, a ...any) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonstream.Message{ID: id, Status: str}) + if err != nil { + return formatError(err) + } + return appendNewline(b) +} + +// formatError formats the error as a JSON object +func formatError(err error) []byte { + jsonError, ok := err.(*jsonstream.Error) + if !ok { + jsonError = &jsonstream.Error{Message: err.Error()} + } + if b, err := json.Marshal(&jsonstream.Message{Error: jsonError}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...any) []byte { + return formatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonstream.Progress, aux any) []byte { + if progress == nil { + progress = &jsonstream.Progress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonstream.Message{ + Status: action, + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...any) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func rawProgressString(p *jsonstream.Progress) string { + if p == nil || (p.Current <= 0 && p.Total <= 0) { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + return fmt.Sprintf("%8v", units.HumanSize(float64(p.Current))) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox := fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + + var numbersBox string + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + var timeLeftBox string + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Since(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + timeLeftBox = " " + left.Round(time.Second).String() + } + return pbBox + numbersBox + timeLeftBox +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonstream.Progress, aux any) []byte { + if progress == nil { + progress = &jsonstream.Progress{} + } + endl := "\r" + out := rawProgressString(progress) + if out == "" { + endl += "\n" + } + return []byte(action + " " + out + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...any) []byte + formatProgress(id, action string, progress *jsonstream.Progress, aux any) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool + mu sync.Mutex +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonstream.Progress{ + Current: prog.Current, + Total: prog.Total, + HideCounts: prog.HideCounts, + Units: prog.Units, + } + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + + out.mu.Lock() + defer out.mu.Unlock() + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/client/pkg/stringid/stringid.go b/vendor/github.com/moby/moby/client/pkg/stringid/stringid.go new file mode 100644 index 000000000..030e07085 --- /dev/null +++ b/vendor/github.com/moby/moby/client/pkg/stringid/stringid.go @@ -0,0 +1,47 @@ +// Package stringid provides helper functions for dealing with string identifiers. +// +// It is similar to the package used by the daemon, but for presentational +// purposes in the client. +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "strings" +) + +const ( + shortLen = 12 + fullLen = 64 +) + +// TruncateID returns a shorthand version of a string identifier for presentation. +// For convenience, it accepts both digests ("sha256:xxxx") and IDs without an +// algorithm prefix. It truncates the algorithm (if any) before truncating the +// ID. The length of the truncated ID is currently fixed, but users should make +// no assumptions of this to not change; it is merely a prefix of the ID that +// provides enough uniqueness for common scenarios. +// +// Truncated IDs ("ID-prefixes") usually can be used to uniquely identify an +// object (such as a container or network), but collisions may happen, in +// which case an "ambiguous result" error is produced. In case of a collision, +// the caller should try with a longer prefix or the full-length ID. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +// GenerateRandomID returns a unique, 64-character ID consisting of a-z, 0-9. +func GenerateRandomID() string { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + return id +} diff --git a/vendor/github.com/moby/moby/client/pkg/versions/compare.go b/vendor/github.com/moby/moby/client/pkg/versions/compare.go new file mode 100644 index 000000000..1a0325c7e --- /dev/null +++ b/vendor/github.com/moby/moby/client/pkg/versions/compare.go @@ -0,0 +1,65 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + maxVer := len(currTab) + if len(otherTab) > maxVer { + maxVer = len(otherTab) + } + for i := 0; i < maxVer; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/moby/moby/client/plugin_create.go b/vendor/github.com/moby/moby/client/plugin_create.go new file mode 100644 index 000000000..c1a2dd5a6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_create.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} + +// PluginCreateResult represents the result of a plugin create operation. +type PluginCreateResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions PluginCreateOptions) (PluginCreateResult, error) { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + defer ensureReaderClosed(resp) + return PluginCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable.go b/vendor/github.com/moby/moby/client/plugin_disable.go new file mode 100644 index 000000000..65ab0aa00 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginDisableResult represents the result of a plugin disable operation. +type PluginDisableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginDisableResult{}, err + } + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginDisableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable.go b/vendor/github.com/moby/moby/client/plugin_enable.go new file mode 100644 index 000000000..7c3e26b67 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginEnableResult represents the result of a plugin enable operation. +type PluginEnableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginEnableResult{}, err + } + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginEnableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect.go b/vendor/github.com/moby/moby/client/plugin_inspect.go new file mode 100644 index 000000000..8caf06a8e --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginInspectOptions holds parameters to inspect a plugin. +type PluginInspectOptions struct { + // Add future optional parameters here +} + +// PluginInspectResult holds the result from the [Client.PluginInspect] method. +type PluginInspectResult struct { + Plugin plugin.Plugin + Raw json.RawMessage +} + +// PluginInspect inspects an existing plugin +func (cli *Client) PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginInspectResult{}, err + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + return PluginInspectResult{}, err + } + + var out PluginInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Plugin) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_install.go b/vendor/github.com/moby/moby/client/plugin_install.go new file mode 100644 index 000000000..a589b2e1f --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_install.go @@ -0,0 +1,175 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginInstallResult holds the result of a plugin install operation. +// It is an io.ReadCloser from which the caller can read installation progress or result. +type PluginInstallResult struct { + io.ReadCloser +} + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (_ PluginInstallResult, retErr error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return PluginInstallResult{}, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return PluginInstallResult{}, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return PluginInstallResult{}, err + } + + name = resp.Header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.Body) + if err != nil { + _ = pw.CloseWithError(err) + return + } + defer func() { + if retErr != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if _, err := cli.PluginSet(ctx, name, PluginSetOptions{Args: options.Args}); err != nil { + _ = pw.CloseWithError(err) + return + } + } + + if options.Disabled { + _ = pw.Close() + return + } + + _, enableErr := cli.PluginEnable(ctx, name, PluginEnableOptions{Timeout: 0}) + _ = pw.CloseWithError(enableErr) + }() + return PluginInstallResult{pr}, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges plugin.Privileges, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options pluginOptions) (plugin.Privileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + if cerrdefs.IsUnauthorized(err) && options.getPrivilegeFunc() != nil { + // TODO: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.getPrivilegeFunc()(ctx) + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.setRegistryAuth(newAuthHeader) + resp, err = cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges plugin.Privileges + if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.getAcceptAllPermissions() && options.getAcceptPermissionsFunc() != nil && len(privileges) > 0 { + accept, err := options.getAcceptPermissionsFunc()(ctx, privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, errors.New("permission denied while installing plugin " + options.getRemoteRef()) + } + } + return privileges, nil +} + +type pluginOptions interface { + getRegistryAuth() string + setRegistryAuth(string) + getPrivilegeFunc() func(context.Context) (string, error) + getAcceptAllPermissions() bool + getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) + getRemoteRef() string +} + +func (o *PluginInstallOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginInstallOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginInstallOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginInstallOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginInstallOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginInstallOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/moby/moby/client/plugin_list.go b/vendor/github.com/moby/moby/client/plugin_list.go new file mode 100644 index 000000000..cbd90b407 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginListOptions holds parameters to list plugins. +type PluginListOptions struct { + Filters Filters +} + +// PluginListResult represents the result of a plugin list operation. +type PluginListResult struct { + Items []plugin.Plugin +} + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return PluginListResult{}, err + } + + var plugins plugin.ListResponse + err = json.NewDecoder(resp.Body).Decode(&plugins) + return PluginListResult{Items: plugins}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_push.go b/vendor/github.com/moby/moby/client/plugin_push.go new file mode 100644 index 000000000..4ba25d133 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "io" + "net/http" + + "github.com/moby/moby/api/types/registry" +) + +// PluginPushOptions holds parameters to push a plugin. +type PluginPushOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// PluginPushResult is the result of a plugin push operation +type PluginPushResult struct { + io.ReadCloser +} + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginPushResult{}, err + } + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {options.RegistryAuth}, + }) + if err != nil { + return PluginPushResult{}, err + } + return PluginPushResult{resp.Body}, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove.go b/vendor/github.com/moby/moby/client/plugin_remove.go new file mode 100644 index 000000000..229f40858 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginRemoveResult represents the result of a plugin removal. +type PluginRemoveResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return PluginRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_set.go b/vendor/github.com/moby/moby/client/plugin_set.go new file mode 100644 index 000000000..c1f6bb5fa --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" +) + +// PluginSetOptions defines options for modifying a plugin's settings. +type PluginSetOptions struct { + Args []string +} + +// PluginSetResult represents the result of a plugin set operation. +type PluginSetResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginSetResult{}, err + } + + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, options.Args, nil) + defer ensureReaderClosed(resp) + return PluginSetResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_upgrade.go b/vendor/github.com/moby/moby/client/plugin_upgrade.go new file mode 100644 index 000000000..f9df6e584 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_upgrade.go @@ -0,0 +1,89 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginUpgradeOptions holds parameters to upgrade a plugin. +type PluginUpgradeOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginUpgradeResult holds the result of a plugin upgrade operation. +type PluginUpgradeResult io.ReadCloser + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } + + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges plugin.Privileges, name, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (o *PluginUpgradeOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginUpgradeOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginUpgradeOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginUpgradeOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginUpgradeOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginUpgradeOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/moby/moby/client/request.go b/vendor/github.com/moby/moby/client/request.go new file mode 100644 index 000000000..50c12eb15 --- /dev/null +++ b/vendor/github.com/moby/moby/client/request.go @@ -0,0 +1,373 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "reflect" + "strings" + + "github.com/moby/moby/api/types/common" +) + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http POST request to the API. +func (cli *Client) post(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) + if err != nil { + return nil, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, jsonBody, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) put(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) + if err != nil { + return nil, err + } + return cli.putRaw(ctx, path, query, jsonBody, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + // PUT requests are expected to always have a body (apparently) + // so explicitly pass an empty body to sendRequest to signal that + // it should set the Content-Type header if not already present. + if body == nil { + body = http.NoBody + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +// prepareJSONRequest encodes the given body to JSON and returns it as an [io.Reader], and sets the Content-Type +// header. If body is nil, or a nil-interface, a "nil" body is returned without +// error. +func prepareJSONRequest(body any, headers http.Header) (io.Reader, http.Header, error) { + jsonBody, err := jsonEncode(body) + if err != nil { + return nil, headers, err + } + if jsonBody == nil || jsonBody == http.NoBody { + // no content-type is set on empty requests. + return jsonBody, headers, nil + } + + hdr := http.Header{} + if headers != nil { + hdr = headers.Clone() + } + + // TODO(thaJeztah): should this return an error if a different Content-Type is already set? + hdr.Set("Content-Type", "application/json") + return jsonBody, hdr, nil +} + +func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + req.URL.Scheme = cli.scheme + req.URL.Host = cli.addr + + if cli.proto == "unix" || cli.proto == "npipe" { + // Override host header for non-tcp connections. + req.Host = DummyHost + } + + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return nil, err + } + + resp, err := cli.doRequest(req) + if err != nil { + // Failed to connect or context error. + return resp, err + } + + // Successfully made a request; return the response and handle any + // API HTTP response errors. + return resp, checkResponseErr(resp) +} + +// doRequest sends an HTTP request and returns an HTTP response. It is a +// wrapper around [http.Client.Do] with extra handling to decorate errors. +// +// Otherwise, it behaves identical to [http.Client.Do]; an error is returned +// when failing to make a connection, On error, any Response can be ignored. +// A non-2xx status code doesn't cause an error. +func (cli *Client) doRequest(req *http.Request) (*http.Response, error) { + resp, err := cli.client.Do(req) + if err == nil { + return resp, nil + } + + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return nil, errConnectionFailed{fmt.Errorf("%w.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} + } + + const ( + // Go 1.25 / TLS 1.3 may produce a generic "handshake failure" + // whereas TLS 1.2 may produce a "bad certificate" TLS alert. + // See https://github.com/golang/go/issues/56371 + // + // > https://tip.golang.org/doc/go1.12#tls_1_3 + // > + // > In TLS 1.3 the client is the last one to speak in the handshake, so if + // > it causes an error to occur on the server, it will be returned on the + // > client by the first Read, not by Handshake. For example, that will be + // > the case if the server rejects the client certificate. + // + // https://github.com/golang/go/blob/go1.25.1/src/crypto/tls/alert.go#L71-L72 + alertBadCertificate = "bad certificate" // go1.24 / TLS 1.2 + alertHandshakeFailure = "handshake failure" // go1.25 / TLS 1.3 + ) + + // TODO(thaJeztah): see if we can use errors.As for a [crypto/tls.AlertError] instead of bare string matching. + if cli.scheme == "https" && (strings.Contains(err.Error(), alertHandshakeFailure) || strings.Contains(err.Error(), alertBadCertificate)) { + return nil, errConnectionFailed{fmt.Errorf("the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings: %w", err)} + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + + if errors.Is(err, os.ErrPermission) { + // Don't include request errors ("Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + return nil, errConnectionFailed{fmt.Errorf("permission denied while trying to connect to the docker API at %v", cli.host)} + } + if errors.Is(err, os.ErrNotExist) { + // Unwrap the error to remove request errors ("Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + err = errors.Unwrap(err) + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v; check if the path is correct and if the daemon is running: %w", cli.host, err)} + } + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) { + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v: %w", cli.host, dnsErr)} + } + + var nErr net.Error + if errors.As(err, &nErr) { + // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? + if nErr.Timeout() { + return nil, connectionFailed(cli.host) + } + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { + return nil, connectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { + err = fmt.Errorf("in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect: %w", err) + } else { + _ = f.Close() + err = fmt.Errorf("this error may indicate that the docker daemon is not running: %w", err) + } + } + + return nil, errConnectionFailed{fmt.Errorf("error during connect: %w", err)} +} + +func checkResponseErr(serverResp *http.Response) (retErr error) { + if serverResp == nil { + return nil + } + if serverResp.StatusCode >= http.StatusOK && serverResp.StatusCode < http.StatusBadRequest { + return nil + } + defer func() { + retErr = httpErrorFromStatusCode(retErr, serverResp.StatusCode) + }() + + var body []byte + var err error + var reqURL string + if serverResp.Request != nil { + reqURL = serverResp.Request.URL.String() + } + statusMsg := serverResp.Status + if statusMsg == "" { + statusMsg = http.StatusText(serverResp.StatusCode) + } + var reqMethod string + if serverResp.Request != nil { + reqMethod = serverResp.Request.Method + } + if serverResp.Body != nil && reqMethod != http.MethodHead { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.Body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + if reqURL != "" { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL) + } + return fmt.Errorf("request returned %s with a message (> %d bytes); check if the server supports the requested API version", statusMsg, bodyMax) + } + } + if len(body) == 0 { + if reqURL != "" { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL) + } + return fmt.Errorf("request returned %s; check if the server supports the requested API version", statusMsg) + } + + var daemonErr error + if serverResp.Header.Get("Content-Type") == "application/json" { + var errorResponse common.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("error reading JSON: %w", err) + } + if errorResponse.Message == "" { + // Error-message is empty, which means that we successfully parsed the + // JSON-response (no error produced), but it didn't contain an error + // message. This could either be because the response was empty, or + // the response was valid JSON, but not with the expected schema + // ([common.ErrorResponse]). + // + // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields) + // due to the API using an open schema (we must anticipate fields + // being added to [common.ErrorResponse] in the future, and not + // reject those responses. + // + // For these cases, we construct an error with the status-code + // returned, but we could consider returning (a truncated version + // of) the actual response as-is. + // + // TODO(thaJeztah): consider adding a log.Debug to allow clients to debug the actual response when enabling debug logging. + daemonErr = fmt.Errorf(`API returned a %d (%s) but provided no error-message`, + serverResp.StatusCode, + http.StatusText(serverResp.StatusCode), + ) + } else { + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + } + } else { + // Fall back to returning the response as-is for situations where a + // plain text error is returned. This branch may also catch + // situations where a proxy is involved, returning a HTML response. + daemonErr = errors.New(strings.TrimSpace(string(body))) + } + return fmt.Errorf("Error response from daemon: %w", daemonErr) +} + +func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[http.CanonicalHeaderKey(k)] = v + } + + if cli.userAgent != nil { + if *cli.userAgent == "" { + req.Header.Del("User-Agent") + } else { + req.Header.Set("User-Agent", *cli.userAgent) + } + } + return req +} + +func jsonEncode(data any) (io.Reader, error) { + switch x := data.(type) { + case nil: + return http.NoBody, nil + case io.Reader: + // http.NoBody or other readers + return x, nil + case json.RawMessage: + if len(x) == 0 { + return http.NoBody, nil + } + return bytes.NewReader(x), nil + } + + // encoding/json encodes a nil pointer as the JSON document `null`, + // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. + // That is almost certainly not what the caller intended as the request body. + if v := reflect.ValueOf(data); v.Kind() == reflect.Ptr && v.IsNil() { + return http.NoBody, nil + } + + b, err := json.Marshal(data) + if err != nil { + return nil, err + } + return bytes.NewReader(b), nil +} + +func ensureReaderClosed(response *http.Response) { + if response == nil || response.Body == nil { + return + } + if response.ContentLength == 0 || (response.Request != nil && response.Request.Method == http.MethodHead) { + // No need to drain head requests or zero-length responses. + _ = response.Body.Close() + return + } + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + // see https://github.com/google/go-github/pull/317/files#r57536827 + // + // TODO(thaJeztah): see if this optimization is still needed, or already implemented in stdlib, + // and check if context-cancellation should handle this as well. If still needed, consider + // wrapping response.Body, or returning a "closer()" from [Client.sendRequest] and related + // methods. + _, _ = io.CopyN(io.Discard, response.Body, 512) + _ = response.Body.Close() +} diff --git a/vendor/github.com/moby/moby/client/secret_create.go b/vendor/github.com/moby/moby/client/secret_create.go new file mode 100644 index 000000000..8e59a42ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretCreateOptions holds options for creating a secret. +type SecretCreateOptions struct { + Spec swarm.SecretSpec +} + +// SecretCreateResult holds the result from the [Client.SecretCreate] method. +type SecretCreateResult struct { + ID string +} + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) { + resp, err := cli.post(ctx, "/secrets/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return SecretCreateResult{}, err + } + return SecretCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect.go b/vendor/github.com/moby/moby/client/secret_inspect.go new file mode 100644 index 000000000..fefd4cd23 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretInspectOptions holds options for inspecting a secret. +type SecretInspectOptions struct { + // Add future optional parameters here +} + +// SecretInspectResult holds the result from the [Client.SecretInspect]. method. +type SecretInspectResult struct { + Secret swarm.Secret + Raw json.RawMessage +} + +// SecretInspect returns the secret information with raw data. +func (cli *Client) SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretInspectResult{}, err + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + return SecretInspectResult{}, err + } + + var out SecretInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Secret) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/secret_list.go b/vendor/github.com/moby/moby/client/secret_list.go new file mode 100644 index 000000000..be3695575 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters Filters +} + +// SecretListResult holds the result from the [client.SecretList] method. +type SecretListResult struct { + Items []swarm.Secret +} + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretListResult{}, err + } + + var out SecretListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return SecretListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_remove.go b/vendor/github.com/moby/moby/client/secret_remove.go new file mode 100644 index 000000000..8554f3f21 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type SecretRemoveOptions struct { + // Add future optional parameters here +} + +type SecretRemoveResult struct { + // Add future fields here +} + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretRemoveResult{}, err + } + return SecretRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_update.go b/vendor/github.com/moby/moby/client/secret_update.go new file mode 100644 index 000000000..c88ad1106 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretUpdateOptions holds options for updating a secret. +type SecretUpdateOptions struct { + Version swarm.Version + Spec swarm.SecretSpec +} + +type SecretUpdateResult struct{} + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretUpdateResult{}, err + } + return SecretUpdateResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/service_create.go b/vendor/github.com/moby/moby/client/service_create.go new file mode 100644 index 000000000..319bca6f4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create.go @@ -0,0 +1,206 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" + "github.com/opencontainers/go-digest" +) + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + Spec swarm.ServiceSpec + + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResult represents the result of creating a service. +type ServiceCreateResult struct { + // ID is the ID of the created service. + ID string + + // Warnings is a list of warnings that occurred during service creation. + Warnings []string +} + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) { + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if options.Spec.TaskTemplate.ContainerSpec == nil && (options.Spec.TaskTemplate.Runtime == "" || options.Spec.TaskTemplate.Runtime == swarm.RuntimeContainer) { + options.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceCreateResult{}, err + } + + // ensure that the image is tagged + var warnings []string + switch { + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + if warning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + if warning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + } + + headers := http.Header{} + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } + resp, err := cli.post(ctx, "/services/create", nil, options.Spec, headers) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceCreateResult{}, err + } + + var response swarm.ServiceCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + warnings = append(warnings, response.Warnings...) + + return ServiceCreateResult{ + ID: response.ID, + Warnings: warnings, + }, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth) + if err != nil { + return digestWarning(taskSpec.ContainerSpec.Image) + } + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + return "" +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth) + if err != nil { + return digestWarning(taskSpec.PluginSpec.Remote) + } + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + return "" +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, DistributionInspectOptions{ + EncodedRegistryAuth: encodedAuth, + }) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, hasDigest := namedRef.(reference.Digested); !hasDigest { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/service_inspect.go b/vendor/github.com/moby/moby/client/service_inspect.go new file mode 100644 index 000000000..9bda43f86 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceInspectOptions holds parameters related to the service inspect operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// ServiceInspectResult represents the result of a service inspect operation. +type ServiceInspectResult struct { + Service swarm.Service + Raw json.RawMessage +} + +// ServiceInspect retrieves detailed information about a specific service by its ID. +func (cli *Client) ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceInspectResult{}, err + } + + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", options.InsertDefaults)) + resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + return ServiceInspectResult{}, err + } + + var out ServiceInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Service) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/service_list.go b/vendor/github.com/moby/moby/client/service_list.go new file mode 100644 index 000000000..94b5204be --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters Filters + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceListResult represents the result of a service list operation. +type ServiceListResult struct { + Items []swarm.Service +} + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceListResult{}, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.Body).Decode(&services) + return ServiceListResult{Items: services}, err +} diff --git a/vendor/github.com/moby/moby/client/service_logs.go b/vendor/github.com/moby/moby/client/service_logs.go new file mode 100644 index 000000000..fd565db5a --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs.go @@ -0,0 +1,91 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ServiceLogsOptions holds parameters to filter logs with. +type ServiceLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ServiceLogsResult holds the result of a service logs operation. +// It implements [io.ReadCloser]. +// It's up to the caller to close the stream. +type ServiceLogsResult interface { + io.ReadCloser +} + +// ServiceLogs returns the logs generated by a service in a [ServiceLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + serviceID, err := trimID("service", serviceID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &serviceLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type serviceLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*serviceLogsResult)(nil) + _ ServiceLogsResult = (*serviceLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/service_remove.go b/vendor/github.com/moby/moby/client/service_remove.go new file mode 100644 index 000000000..163689b69 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +// ServiceRemoveOptions contains options for removing a service. +type ServiceRemoveOptions struct { + // No options currently; placeholder for future use +} + +// ServiceRemoveResult contains the result of removing a service. +type ServiceRemoveResult struct { + // No fields currently; placeholder for future use +} + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceRemoveResult{}, err + } + + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return ServiceRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/service_update.go b/vendor/github.com/moby/moby/client/service_update.go new file mode 100644 index 000000000..2505fe4b8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + Version swarm.Version + Spec swarm.ServiceSpec + + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom swarm.RegistryAuthSource + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceUpdateResult represents the result of a service update. +type ServiceUpdateResult struct { + // Warnings contains any warnings that occurred during the update. + Warnings []string +} + +// ServiceUpdate updates a Service. The version number is required to avoid +// conflicting writes. It must be the value as set *before* the update. +// You can find this value in the [swarm.Service.Meta] field, which can +// be found using [Client.ServiceInspectWithRaw]. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceUpdateResult{}, err + } + + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceUpdateResult{}, err + } + + query := url.Values{} + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", string(options.RegistryAuthFrom)) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", options.Version.String()) + + // ensure that the image is tagged + var warnings []string + switch { + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + if warning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + if warning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + } + + headers := http.Header{} + if options.EncodedRegistryAuth != "" { + headers.Set(registry.AuthHeader, options.EncodedRegistryAuth) + } + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, options.Spec, headers) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceUpdateResult{}, err + } + + var response swarm.ServiceUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + warnings = append(warnings, response.Warnings...) + return ServiceUpdateResult{Warnings: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..03ecce409 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmGetUnlockKeyResult contains the swarm unlock key. +type SwarmGetUnlockKeyResult struct { + Key string +} + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) { + resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmGetUnlockKeyResult{}, err + } + + var response swarm.UnlockKeyResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return SwarmGetUnlockKeyResult{Key: response.UnlockKey}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init.go b/vendor/github.com/moby/moby/client/swarm_init.go new file mode 100644 index 000000000..caad56085 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "encoding/json" + "net/netip" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInitOptions contains options for initializing a new swarm. +type SwarmInitOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec swarm.Spec + AutoLockManagers bool + Availability swarm.NodeAvailability + DefaultAddrPool []netip.Prefix + SubnetSize uint32 +} + +// SwarmInitResult contains the result of a SwarmInit operation. +type SwarmInitResult struct { + NodeID string +} + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) { + req := swarm.InitRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + DataPathPort: options.DataPathPort, + ForceNewCluster: options.ForceNewCluster, + Spec: options.Spec, + AutoLockManagers: options.AutoLockManagers, + Availability: options.Availability, + DefaultAddrPool: options.DefaultAddrPool, + SubnetSize: options.SubnetSize, + } + + resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInitResult{}, err + } + + var nodeID string + err = json.NewDecoder(resp.Body).Decode(&nodeID) + return SwarmInitResult{NodeID: nodeID}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect.go b/vendor/github.com/moby/moby/client/swarm_inspect.go new file mode 100644 index 000000000..40e1d018a --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInspectOptions holds options for inspecting a swarm. +type SwarmInspectOptions struct { + // Add future optional parameters here +} + +// SwarmInspectResult represents the result of a SwarmInspect operation. +type SwarmInspectResult struct { + Swarm swarm.Swarm +} + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) { + resp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInspectResult{}, err + } + + var s swarm.Swarm + err = json.NewDecoder(resp.Body).Decode(&s) + return SwarmInspectResult{Swarm: s}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_join.go b/vendor/github.com/moby/moby/client/swarm_join.go new file mode 100644 index 000000000..66a754482 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmJoinOptions specifies options for joining a swarm. +type SwarmJoinOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability swarm.NodeAvailability +} + +// SwarmJoinResult contains the result of joining a swarm. +type SwarmJoinResult struct { + // No fields currently; placeholder for future use +} + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) { + req := swarm.JoinRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + RemoteAddrs: options.RemoteAddrs, + JoinToken: options.JoinToken, + Availability: options.Availability, + } + + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmJoinResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave.go b/vendor/github.com/moby/moby/client/swarm_leave.go new file mode 100644 index 000000000..a65a13de3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + "net/url" +) + +// SwarmLeaveOptions contains options for leaving a swarm. +type SwarmLeaveOptions struct { + Force bool +} + +// SwarmLeaveResult represents the result of a SwarmLeave operation. +type SwarmLeaveResult struct{} + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + defer ensureReaderClosed(resp) + return SwarmLeaveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock.go b/vendor/github.com/moby/moby/client/swarm_unlock.go new file mode 100644 index 000000000..92335afb5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUnlockOptions specifies options for unlocking a swarm. +type SwarmUnlockOptions struct { + Key string +} + +// SwarmUnlockResult represents the result of unlocking a swarm. +type SwarmUnlockResult struct{} + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) { + req := &swarm.UnlockRequest{ + UnlockKey: options.Key, + } + resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmUnlockResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update.go b/vendor/github.com/moby/moby/client/swarm_update.go new file mode 100644 index 000000000..81f62b2c0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUpdateOptions contains options for updating a swarm. +type SwarmUpdateOptions struct { + Version swarm.Version + Spec swarm.Spec + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} + +// SwarmUpdateResult represents the result of a SwarmUpdate operation. +type SwarmUpdateResult struct{} + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) { + query := url.Values{} + query.Set("version", options.Version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(options.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(options.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(options.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return SwarmUpdateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/system_disk_usage.go b/vendor/github.com/moby/moby/client/system_disk_usage.go new file mode 100644 index 000000000..64a369df8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_disk_usage.go @@ -0,0 +1,332 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "slices" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/system" + "github.com/moby/moby/api/types/volume" + "github.com/moby/moby/client/pkg/versions" +) + +// DiskUsageOptions holds parameters for [Client.DiskUsage] operations. +type DiskUsageOptions struct { + // Containers controls whether container disk usage should be computed. + Containers bool + + // Images controls whether image disk usage should be computed. + Images bool + + // BuildCache controls whether build cache disk usage should be computed. + BuildCache bool + + // Volumes controls whether volume disk usage should be computed. + Volumes bool + + // Verbose enables more detailed disk usage information. + Verbose bool +} + +// DiskUsageResult is the result of [Client.DiskUsage] operations. +type DiskUsageResult struct { + // Containers holds container disk usage information. + Containers ContainersDiskUsage + + // Images holds image disk usage information. + Images ImagesDiskUsage + + // BuildCache holds build cache disk usage information. + BuildCache BuildCacheDiskUsage + + // Volumes holds volume disk usage information. + Volumes VolumesDiskUsage +} + +// ContainersDiskUsage contains disk usage information for containers. +type ContainersDiskUsage struct { + // ActiveCount is the number of active containers. + ActiveCount int64 + + // TotalCount is the total number of containers. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all containers. + TotalSize int64 + + // Items holds detailed information about each container. + Items []container.Summary +} + +// ImagesDiskUsage contains disk usage information for images. +type ImagesDiskUsage struct { + // ActiveCount is the number of active images. + ActiveCount int64 + + // TotalCount is the total number of images. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all images. + TotalSize int64 + + // Items holds detailed information about each image. + Items []image.Summary +} + +// VolumesDiskUsage contains disk usage information for volumes. +type VolumesDiskUsage struct { + // ActiveCount is the number of active volumes. + ActiveCount int64 + + // TotalCount is the total number of volumes. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all volumes. + TotalSize int64 + + // Items holds detailed information about each volume. + Items []volume.Volume +} + +// BuildCacheDiskUsage contains disk usage information for build cache. +type BuildCacheDiskUsage struct { + // ActiveCount is the number of active build cache records. + ActiveCount int64 + + // TotalCount is the total number of build cache records. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all build cache records. + TotalSize int64 + + // Items holds detailed information about each build cache record. + Items []build.CacheRecord +} + +// DiskUsage requests the current data usage from the daemon. +func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) { + query := url.Values{} + + for _, t := range []struct { + flag bool + sysObj system.DiskUsageObject + }{ + {options.Containers, system.ContainerObject}, + {options.Images, system.ImageObject}, + {options.Volumes, system.VolumeObject}, + {options.BuildCache, system.BuildCacheObject}, + } { + if t.flag { + query.Add("type", string(t.sysObj)) + } + } + + if options.Verbose { + query.Set("verbose", "1") + } + + resp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return DiskUsageResult{}, err + } + + if versions.LessThan(cli.version, "1.52") { + // Generate result from a legacy response. + var du legacyDiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + return diskUsageResultFromLegacyAPI(&du), nil + } + + var du system.DiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + var r DiskUsageResult + if idu := du.ImageUsage; idu != nil { + r.Images = ImagesDiskUsage{ + ActiveCount: idu.ActiveCount, + Reclaimable: idu.Reclaimable, + TotalCount: idu.TotalCount, + TotalSize: idu.TotalSize, + } + + if options.Verbose { + r.Images.Items = slices.Clone(idu.Items) + } + } + + if cdu := du.ContainerUsage; cdu != nil { + r.Containers = ContainersDiskUsage{ + ActiveCount: cdu.ActiveCount, + Reclaimable: cdu.Reclaimable, + TotalCount: cdu.TotalCount, + TotalSize: cdu.TotalSize, + } + + if options.Verbose { + r.Containers.Items = slices.Clone(cdu.Items) + } + } + + if bdu := du.BuildCacheUsage; bdu != nil { + r.BuildCache = BuildCacheDiskUsage{ + ActiveCount: bdu.ActiveCount, + Reclaimable: bdu.Reclaimable, + TotalCount: bdu.TotalCount, + TotalSize: bdu.TotalSize, + } + + if options.Verbose { + r.BuildCache.Items = slices.Clone(bdu.Items) + } + } + + if vdu := du.VolumeUsage; vdu != nil { + r.Volumes = VolumesDiskUsage{ + ActiveCount: vdu.ActiveCount, + Reclaimable: vdu.Reclaimable, + TotalCount: vdu.TotalCount, + TotalSize: vdu.TotalSize, + } + + if options.Verbose { + r.Volumes.Items = slices.Clone(vdu.Items) + } + } + + return r, nil +} + +// legacyDiskUsage is the response as was used by API < v1.52. +type legacyDiskUsage struct { + LayersSize int64 `json:"LayersSize,omitempty"` + Images []image.Summary `json:"Images,omitzero"` + Containers []container.Summary `json:"Containers,omitzero"` + Volumes []volume.Volume `json:"Volumes,omitzero"` + BuildCache []build.CacheRecord `json:"BuildCache,omitzero"` +} + +func diskUsageResultFromLegacyAPI(du *legacyDiskUsage) DiskUsageResult { + return DiskUsageResult{ + Images: imageDiskUsageFromLegacyAPI(du), + Containers: containerDiskUsageFromLegacyAPI(du), + BuildCache: buildCacheDiskUsageFromLegacyAPI(du), + Volumes: volumeDiskUsageFromLegacyAPI(du), + } +} + +func imageDiskUsageFromLegacyAPI(du *legacyDiskUsage) ImagesDiskUsage { + idu := ImagesDiskUsage{ + TotalSize: du.LayersSize, + TotalCount: int64(len(du.Images)), + Items: du.Images, + } + + var used int64 + for _, i := range idu.Items { + if i.Containers > 0 { + idu.ActiveCount++ + + if i.Size == -1 || i.SharedSize == -1 { + continue + } + used += (i.Size - i.SharedSize) + } + } + + if idu.TotalCount > 0 { + idu.Reclaimable = idu.TotalSize - used + } + + return idu +} + +func containerDiskUsageFromLegacyAPI(du *legacyDiskUsage) ContainersDiskUsage { + cdu := ContainersDiskUsage{ + TotalCount: int64(len(du.Containers)), + Items: du.Containers, + } + + var used int64 + for _, c := range cdu.Items { + cdu.TotalSize += c.SizeRw + switch c.State { + case container.StateRunning, container.StatePaused, container.StateRestarting: + cdu.ActiveCount++ + used += c.SizeRw + } + } + + cdu.Reclaimable = cdu.TotalSize - used + return cdu +} + +func buildCacheDiskUsageFromLegacyAPI(du *legacyDiskUsage) BuildCacheDiskUsage { + bdu := BuildCacheDiskUsage{ + TotalCount: int64(len(du.BuildCache)), + Items: du.BuildCache, + } + + var used int64 + for _, b := range du.BuildCache { + if !b.Shared { + bdu.TotalSize += b.Size + } + + if b.InUse { + bdu.ActiveCount++ + if !b.Shared { + used += b.Size + } + } + } + + bdu.Reclaimable = bdu.TotalSize - used + return bdu +} + +func volumeDiskUsageFromLegacyAPI(du *legacyDiskUsage) VolumesDiskUsage { + vdu := VolumesDiskUsage{ + TotalCount: int64(len(du.Volumes)), + Items: du.Volumes, + } + + var used int64 + for _, v := range vdu.Items { + // Ignore volumes with no usage data + if v.UsageData != nil { + if v.UsageData.RefCount > 0 { + vdu.ActiveCount++ + used += v.UsageData.Size + } + if v.UsageData.Size > 0 { + vdu.TotalSize += v.UsageData.Size + } + } + } + + vdu.Reclaimable = vdu.TotalSize - used + return vdu +} diff --git a/vendor/github.com/moby/moby/client/system_events.go b/vendor/github.com/moby/moby/client/system_events.go new file mode 100644 index 000000000..748e01208 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_events.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/moby/moby/api/types" + "github.com/moby/moby/api/types/events" + "github.com/moby/moby/client/internal" + "github.com/moby/moby/client/internal/timestamp" +) + +// EventsListOptions holds parameters to filter events with. +type EventsListOptions struct { + Since string + Until string + Filters Filters +} + +// EventsResult holds the result of an Events query. +type EventsResult struct { + Messages <-chan events.Message + Err <-chan error +} + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an [io.EOF] error is +// sent over the error channel. If an error is sent, all processing is stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options EventsListOptions) EventsResult { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(options) + if err != nil { + close(started) + errs <- err + return + } + + headers := http.Header{} + headers.Add("Accept", types.MediaTypeJSONSequence) + headers.Add("Accept", types.MediaTypeNDJSON) + resp, err := cli.get(ctx, "/events", query, headers) + if err != nil { + close(started) + errs <- err + return + } + defer resp.Body.Close() + + contentType := resp.Header.Get("Content-Type") + decoder := internal.NewJSONStreamDecoder(resp.Body, contentType) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return EventsResult{ + Messages: messages, + Err: errs, + } +} + +func buildEventsQueryParams(options EventsListOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + options.Filters.updateURLValues(query) + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/system_info.go b/vendor/github.com/moby/moby/client/system_info.go new file mode 100644 index 000000000..4c0a2238e --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_info.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/system" +) + +type InfoOptions struct { + // No options currently; placeholder for future use +} + +type SystemInfoResult struct { + Info system.Info +} + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) { + resp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SystemInfoResult{}, err + } + + var info system.Info + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return SystemInfoResult{}, fmt.Errorf("Error reading remote info: %v", err) + } + + return SystemInfoResult{Info: info}, nil +} diff --git a/vendor/github.com/moby/moby/client/task_inspect.go b/vendor/github.com/moby/moby/client/task_inspect.go new file mode 100644 index 000000000..96edcb09f --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskInspectOptions contains options for inspecting a task. +type TaskInspectOptions struct { + // Currently no options are defined. +} + +// TaskInspectResult contains the result of a task inspection. +type TaskInspectResult struct { + Task swarm.Task + Raw json.RawMessage +} + +// TaskInspect returns the task information and its raw representation. +func (cli *Client) TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) { + taskID, err := trimID("task", taskID) + if err != nil { + return TaskInspectResult{}, err + } + + resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + return TaskInspectResult{}, err + } + + var out TaskInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Task) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/task_list.go b/vendor/github.com/moby/moby/client/task_list.go new file mode 100644 index 000000000..5f7c41bb9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters Filters +} + +// TaskListResult contains the result of a task list operation. +type TaskListResult struct { + Items []swarm.Task +} + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return TaskListResult{}, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.Body).Decode(&tasks) + return TaskListResult{Items: tasks}, err +} diff --git a/vendor/github.com/moby/moby/client/task_logs.go b/vendor/github.com/moby/moby/client/task_logs.go new file mode 100644 index 000000000..e4de019f3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_logs.go @@ -0,0 +1,84 @@ +package client + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// TaskLogsOptions holds parameters to filter logs with. +type TaskLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// TaskLogsResult holds the result of a task logs operation. +// It implements [io.ReadCloser]. +type TaskLogsResult interface { + io.ReadCloser +} + +// TaskLogs returns the logs generated by a service in a [TaskLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &taskLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type taskLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*taskLogsResult)(nil) + _ ContainerLogsResult = (*taskLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/utils.go b/vendor/github.com/moby/moby/client/utils.go new file mode 100644 index 000000000..f2ba4744c --- /dev/null +++ b/vendor/github.com/moby/moby/client/utils.go @@ -0,0 +1,149 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + cerrdefs "github.com/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type emptyIDError string + +func (e emptyIDError) InvalidParameter() {} + +func (e emptyIDError) Error() string { + return "invalid " + string(e) + " name or ID: value is empty" +} + +// trimID trims the given object-ID / name, returning an error if it's empty. +func trimID(objType, id string) (string, error) { + id = strings.TrimSpace(id) + if id == "" { + return "", emptyIDError(objType) + } + return id, nil +} + +// parseAPIVersion checks v to be a well-formed (".") +// API version. It returns an error if the value is empty or does not +// have the correct format, but does not validate if the API version is +// within the supported range ([MinAPIVersion] <= v <= [MaxAPIVersion]). +// +// It returns version after normalizing, or an error if validation failed. +func parseAPIVersion(version string) (string, error) { + if strings.TrimPrefix(strings.TrimSpace(version), "v") == "" { + return "", cerrdefs.ErrInvalidArgument.WithMessage("value is empty") + } + major, minor, err := parseMajorMinor(version) + if err != nil { + return "", err + } + return fmt.Sprintf("%d.%d", major, minor), nil +} + +// parseMajorMinor is a helper for parseAPIVersion. +func parseMajorMinor(v string) (major, minor int, _ error) { + if strings.HasPrefix(v, "v") { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("must be formatted .") + } + if strings.TrimSpace(v) == "" { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("value is empty") + } + + majVer, minVer, ok := strings.Cut(v, ".") + if !ok { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("must be formatted .") + } + major, err := strconv.Atoi(majVer) + if err != nil { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("invalid major version: must be formatted .") + } + minor, err = strconv.Atoi(minVer) + if err != nil { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("invalid minor version: must be formatted .") + } + return major, minor, nil +} + +// encodePlatforms marshals the given platform(s) to JSON format, to +// be used for query-parameters for filtering / selecting platforms. +func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { + if len(platform) == 0 { + return []string{}, nil + } + if len(platform) == 1 { + p, err := encodePlatform(&platform[0]) + if err != nil { + return nil, err + } + return []string{p}, nil + } + + seen := make(map[string]struct{}, len(platform)) + out := make([]string, 0, len(platform)) + for i := range platform { + p, err := encodePlatform(&platform[i]) + if err != nil { + return nil, err + } + if _, ok := seen[p]; !ok { + out = append(out, p) + seen[p] = struct{}{} + } + } + return out, nil +} + +// encodePlatform marshals the given platform to JSON format, to +// be used for query-parameters for filtering / selecting platforms. It +// is used as a helper for encodePlatforms, +func encodePlatform(platform *ocispec.Platform) (string, error) { + p, err := json.Marshal(platform) + if err != nil { + return "", fmt.Errorf("%w: invalid platform: %v", cerrdefs.ErrInvalidArgument, err) + } + return string(p), nil +} + +func decodeWithRaw[T any](resp *http.Response, out *T) (raw json.RawMessage, _ error) { + if resp == nil || resp.Body == nil { + return nil, errors.New("empty response") + } + defer ensureReaderClosed(resp) + + var buf bytes.Buffer + tr := io.TeeReader(resp.Body, &buf) + err := json.NewDecoder(tr).Decode(out) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// newCancelReadCloser wraps rc so it's automatically closed when ctx is canceled. +// Close is idempotent and returns the first error from rc.Close. +func newCancelReadCloser(ctx context.Context, rc io.ReadCloser) io.ReadCloser { + crc := &cancelReadCloser{ + rc: rc, + close: sync.OnceValue(rc.Close), + } + context.AfterFunc(ctx, func() { _ = crc.Close() }) + return crc +} + +type cancelReadCloser struct { + rc io.ReadCloser + close func() error +} + +func (c *cancelReadCloser) Read(p []byte) (int, error) { return c.rc.Read(p) } +func (c *cancelReadCloser) Close() error { return c.close() } diff --git a/vendor/github.com/moby/moby/client/version.go b/vendor/github.com/moby/moby/client/version.go new file mode 100644 index 000000000..7fa5a3fa0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/version.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/system" +) + +// ServerVersionOptions specifies options for the server version request. +type ServerVersionOptions struct { + // Currently no options are supported. +} + +// ServerVersionResult contains information about the Docker server host. +type ServerVersionResult struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Experimental indicates that the daemon runs with experimental + // features enabled. + // + // Deprecated: this field will be removed in the next version. + Experimental bool + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []system.ComponentVersion +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ServerVersion returns information of the Docker server host. +func (cli *Client) ServerVersion(ctx context.Context, _ ServerVersionOptions) (ServerVersionResult, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServerVersionResult{}, err + } + + var v system.VersionResponse + err = json.NewDecoder(resp.Body).Decode(&v) + if err != nil { + return ServerVersionResult{}, err + } + + return ServerVersionResult{ + Platform: PlatformInfo{ + Name: v.Platform.Name, + }, + Version: v.Version, + APIVersion: v.APIVersion, + MinAPIVersion: v.MinAPIVersion, + Os: v.Os, + Arch: v.Arch, + Experimental: v.Experimental, //nolint:staticcheck // ignore deprecated field. + Components: v.Components, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_create.go b/vendor/github.com/moby/moby/client/volume_create.go new file mode 100644 index 000000000..674e06335 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create.go @@ -0,0 +1,42 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeCreateOptions specifies the options to create a volume. +type VolumeCreateOptions struct { + Name string + Driver string + DriverOpts map[string]string + Labels map[string]string + ClusterVolumeSpec *volume.ClusterVolumeSpec +} + +// VolumeCreateResult is the result of a volume creation. +type VolumeCreateResult struct { + Volume volume.Volume +} + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) { + createRequest := volume.CreateRequest{ + Name: options.Name, + Driver: options.Driver, + DriverOpts: options.DriverOpts, + Labels: options.Labels, + ClusterVolumeSpec: options.ClusterVolumeSpec, + } + resp, err := cli.post(ctx, "/volumes/create", nil, createRequest, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeCreateResult{}, err + } + + var v volume.Volume + err = json.NewDecoder(resp.Body).Decode(&v) + return VolumeCreateResult{Volume: v}, err +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect.go b/vendor/github.com/moby/moby/client/volume_inspect.go new file mode 100644 index 000000000..cf00236a2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeInspectOptions holds options for inspecting a volume. +type VolumeInspectOptions struct { + // Add future optional parameters here +} + +// VolumeInspectResult holds the result from the [Client.VolumeInspect] method. +type VolumeInspectResult struct { + Volume volume.Volume + Raw json.RawMessage +} + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeInspectResult{}, err + } + + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + return VolumeInspectResult{}, err + } + + var out VolumeInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Volume) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/volume_list.go b/vendor/github.com/moby/moby/client/volume_list.go new file mode 100644 index 000000000..989a0292e --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeListOptions holds parameters to list volumes. +type VolumeListOptions struct { + Filters Filters +} + +// VolumeListResult holds the result from the [Client.VolumeList] method. +type VolumeListResult struct { + // List of volumes. + Items []volume.Volume + + // Warnings that occurred when fetching the list of volumes. + Warnings []string +} + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeListResult{}, err + } + + var apiResp volume.ListResponse + err = json.NewDecoder(resp.Body).Decode(&apiResp) + if err != nil { + return VolumeListResult{}, err + } + + return VolumeListResult{ + Items: apiResp.Volumes, + Warnings: apiResp.Warnings, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_prune.go b/vendor/github.com/moby/moby/client/volume_prune.go new file mode 100644 index 000000000..561e328d7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_prune.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/volume" +) + +// VolumePruneOptions holds parameters to prune volumes. +type VolumePruneOptions struct { + // All controls whether named volumes should also be pruned. By + // default, only anonymous volumes are pruned. + All bool + + // Filters to apply when pruning. + Filters Filters +} + +// VolumePruneResult holds the result from the [Client.VolumePrune] method. +type VolumePruneResult struct { + Report volume.PruneReport +} + +// VolumePrune requests the daemon to delete unused data +func (cli *Client) VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) { + if options.All { + if _, ok := options.Filters["all"]; ok { + return VolumePruneResult{}, errdefs.ErrInvalidArgument.WithMessage(`conflicting options: cannot specify both "all" and "all" filter`) + } + if options.Filters == nil { + options.Filters = Filters{} + } + options.Filters.Add("all", "true") + } + + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumePruneResult{}, err + } + + var report volume.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return VolumePruneResult{}, fmt.Errorf("error retrieving volume prune report: %v", err) + } + + return VolumePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_remove.go b/vendor/github.com/moby/moby/client/volume_remove.go new file mode 100644 index 000000000..0449e08d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "net/url" +) + +// VolumeRemoveOptions holds options for [Client.VolumeRemove]. +type VolumeRemoveOptions struct { + // Force the removal of the volume + Force bool +} + +// VolumeRemoveResult holds the result of [Client.VolumeRemove], +type VolumeRemoveResult struct { + // Add future fields here. +} + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeRemoveResult{}, err + } + return VolumeRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_update.go b/vendor/github.com/moby/moby/client/volume_update.go new file mode 100644 index 000000000..5aa2a0aa1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_update.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" + "github.com/moby/moby/api/types/volume" +) + +// VolumeUpdateOptions holds options for [Client.VolumeUpdate]. +type VolumeUpdateOptions struct { + Version swarm.Version + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *volume.ClusterVolumeSpec `json:"Spec,omitempty"` +} + +// VolumeUpdateResult holds the result of [Client.VolumeUpdate], +type VolumeUpdateResult struct { + // Add future fields here. +} + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeUpdateResult{}, err + } + return VolumeUpdateResult{}, nil +} diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go index 566be6eb1..3b1652a6f 100644 --- a/vendor/github.com/morikuni/aec/aec.go +++ b/vendor/github.com/morikuni/aec/aec.go @@ -129,8 +129,10 @@ func init() { All: 2, } - Save = newAnsi(esc + "s") - Restore = newAnsi(esc + "u") + // Save use both SCO (ESC[s) and DEC (ESC7) sequences as those were never standardised as part of the ANSI + Save = newAnsi(esc + "s" + "\x1b7") + // Restore use both SCO (ESC[u) and DEC (ESC8) and DEC sequences as those were never standardised as part of the ANSI + Restore = newAnsi(esc + "u" + "\x1b8") Hide = newAnsi(esc + "?25l") Show = newAnsi(esc + "?25h") Report = newAnsi(esc + "6n") diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7b762370e..8f8dc65d3 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -220,7 +220,7 @@ func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) return extractSummary(o, f), nil case dto.MetricType_UNTYPED: return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: return extractHistogram(o, f), nil } return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) @@ -403,9 +403,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { infSeen = true } + v := q.GetCumulativeCountFloat() + if v <= 0 { + v = float64(q.GetCumulativeCount()) + } samples = append(samples, &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), + Value: model.SampleValue(v), Timestamp: timestamp, }) } @@ -428,9 +432,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { } lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + v := m.Histogram.GetSampleCountFloat() + if v <= 0 { + v = float64(m.Histogram.GetSampleCount()) + } count := &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), + Value: model.SampleValue(v), Timestamp: timestamp, } samples = append(samples, count) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c34c7de43..4e4c13e72 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -45,6 +45,7 @@ const ( // The Content-Type values for the different wire protocols. Do not do direct // comparisons to these constants, instead use the comparison functions. + // // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. FmtUnknown Format = `` // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index 0290f6abc..872c0c15b 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -13,7 +13,6 @@ // Build only when actually fuzzing //go:build gofuzz -// +build gofuzz package expfmt diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8dbf6d04e..21b93bca3 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -160,38 +160,38 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } switch metricType { case dto.MetricType_COUNTER: @@ -208,39 +208,41 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString(" unknown\n") case dto.MetricType_HISTOGRAM: n, err = w.WriteString(" histogram\n") + case dto.MetricType_GAUGE_HISTOGRAM: + n, err = w.WriteString(" gaugehistogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } if toOM.withUnit && in.Unit != nil { n, err = w.WriteString("# UNIT ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Unit, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } @@ -304,7 +306,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -314,7 +316,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -325,7 +327,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) n += createdTsBytesWritten } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", compliantName, metric, @@ -333,6 +335,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } infSeen := false for _, b := range metric.Histogram.Bucket { + if b.GetCumulativeCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) + } n, err = writeOpenMetricsSample( w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), @@ -341,7 +349,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true @@ -354,9 +362,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E 0, metric.Histogram.GetSampleCount(), true, nil, ) + // We do not check for a float sample count here + // because we will check for it below (and error + // out if needed). written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -366,7 +377,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err + } + if metric.Histogram.GetSampleCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -384,10 +401,10 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } written += n if err != nil { - return + return written, err } } - return + return written, err } // FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index c4e9c1bbc..6b8978145 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -108,38 +108,38 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, false) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } metricType := in.GetType() switch metricType { @@ -151,14 +151,17 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString(" summary\n") case dto.MetricType_UNTYPED: n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // The classic Prometheus text format has no notion of a gauge + // histogram. We render a gauge histogram in the same way as a + // regular histogram. n, err = w.WriteString(" histogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } // Finally the samples, one line for each. @@ -208,7 +211,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -217,13 +220,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } n, err = writeSample( w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), ) - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", name, metric, @@ -231,28 +234,36 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } infSeen := false for _, b := range metric.Histogram.Bucket { + v := b.GetCumulativeCountFloat() + if v == 0 { + v = float64(b.GetCumulativeCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), + v, ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), + v, ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -261,12 +272,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } + n, err = writeSample(w, name, "_count", metric, "", 0, v) default: return written, fmt.Errorf( "unexpected type in metric %s %s", name, metric, @@ -274,10 +286,10 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } written += n if err != nil { - return + return written, err } } - return + return written, err } // writeSample writes a single sample in text format to w, given the metric diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 8f2edde32..00c8841a1 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -48,8 +48,10 @@ func (e ParseError) Error() string { return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) } -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. +// TextParser is used to parse the simple and flat text-based exchange format. +// +// TextParser instances must be created with NewTextParser, the zero value of +// TextParser is invalid. type TextParser struct { metricFamiliesByName map[string]*dto.MetricFamily buf *bufio.Reader // Where the parsed input is read through. @@ -129,9 +131,44 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } + for _, histogramMetric := range p.histograms { + normalizeHistogram(histogramMetric.GetHistogram()) + } return p.metricFamiliesByName, p.err } +// normalizeHistogram makes sure that all the buckets and the count in each +// histogram is either completely float or completely integer. +func normalizeHistogram(histogram *dto.Histogram) { + if histogram == nil { + return + } + anyFloats := false + if histogram.GetSampleCountFloat() != 0 { + anyFloats = true + } else { + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() != 0 { + anyFloats = true + break + } + } + } + if !anyFloats { + return + } + if histogram.GetSampleCountFloat() == 0 { + histogram.SampleCountFloat = proto.Float64(float64(histogram.GetSampleCount())) + histogram.SampleCount = nil + } + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() == 0 { + b.CumulativeCountFloat = proto.Float64(float64(b.GetCumulativeCount())) + b.CumulativeCount = nil + } + } +} + func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} p.currentLabelPairs = nil @@ -281,7 +318,9 @@ func (p *TextParser) readingLabels() stateFn { // Summaries/histograms are special. We have to reset the // currentLabels map, currentQuantile and currentBucket before starting to // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_SUMMARY || + p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { p.currentLabels = map[string]string{} p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() p.currentQuantile = math.NaN() @@ -374,7 +413,9 @@ func (p *TextParser) startLabelName() stateFn { // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && - (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { + ((p.currentMF.GetType() != dto.MetricType_HISTOGRAM && + p.currentMF.GetType() != dto.MetricType_GAUGE_HISTOGRAM) || + p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. @@ -425,7 +466,7 @@ func (p *TextParser) startLabelValue() stateFn { } } // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. @@ -476,7 +517,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -522,24 +563,38 @@ func (p *TextParser) readingValue() stateFn { }, ) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // *sigh* if p.currentMetric.Histogram == nil { p.currentMetric.Histogram = &dto.Histogram{} } switch { case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + if uintValue := uint64(value); value == float64(uintValue) { + p.currentMetric.Histogram.SampleCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative count for histogram %q", p.currentMF.GetName())) + return nil + } + p.currentMetric.Histogram.SampleCountFloat = proto.Float64(value) + } case p.currentIsHistogramSum: p.currentMetric.Histogram.SampleSum = proto.Float64(value) case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) + b := &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + } + if uintValue := uint64(value); value == float64(uintValue) { + b.CumulativeCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative bucket population for histogram %q", p.currentMF.GetName())) + return nil + } + b.CumulativeCountFloat = proto.Float64(value) + } + p.currentMetric.Histogram.Bucket = append(p.currentMetric.Histogram.Bucket, b) } default: p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) @@ -602,10 +657,18 @@ func (p *TextParser) readingType() stateFn { if p.readTokenUntilNewline(false); p.err != nil { return nil // Unexpected end of input. } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + typ := strings.ToUpper(p.currentToken.String()) // Tolerate any combination of upper and lower case. + metricType, ok := dto.MetricType_value[typ] // Tolerate "gauge_histogram" (not originally part of the text format). if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil + // We also want to tolerate "gaugehistogram" to mark a gauge + // histogram, because that string is used in OpenMetrics. Note, + // however, that gauge histograms do not officially exist in the + // classic text format. + if typ != "GAUGEHISTOGRAM" { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + metricType = int32(dto.MetricType_GAUGE_HISTOGRAM) } p.currentMF.Type = dto.MetricType(metricType).Enum() return p.startOfLine @@ -855,7 +918,8 @@ func (p *TextParser) setOrCreateCurrentMF() { } histogramName := histogramMetricName(name) if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if isCount(name) { p.currentIsHistogramCount = true } diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 3c3bf910f..23ecd4505 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,7 +1,9 @@ version: "2" linters: enable: + - errorlint - forbidigo + - gocritic - godot - misspell - revive @@ -11,6 +13,20 @@ linters: forbid: - pattern: ^fmt\.Print.*$ msg: Do not commit print statements. + gocritic: + enable-all: true + disabled-checks: + - commentFormatting + - commentedOutCode + - deferInLoop + - filepathJoin + - hugeParam + - importShadow + - paramTypeCombine + - rangeValCopy + - tooManyResultsChecker + - unnamedResult + - whyNoLint godot: exclude: # Ignore "See: URL". @@ -19,16 +35,12 @@ linters: misspell: locale: US exclusions: - generated: lax presets: - comments - common-false-positives - legacy - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ + warn-unused: true formatters: enable: - gofmt @@ -37,9 +49,3 @@ formatters: goimports: local-prefixes: - github.com/prometheus/procfs - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 7edfe4d09..bce50a19c 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 The Prometheus Authors +# Copyright The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 4de21512f..6f61bec48 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -139,7 +139,7 @@ common-deps: update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ + $(GO) get $$m; \ done $(GO) mod tidy diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 2e5334415..716bdef10 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -73,15 +73,16 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { columns := strings.Fields(line) width := len(columns) - if width == expectedHeaderWidth || width == 0 { + switch width { + case expectedHeaderWidth, 0: continue - } else if width == expectedDataWidth { + case expectedDataWidth: entry, err := parseARPEntry(columns) if err != nil { return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) - } else { + default: return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 838075009..53243e687 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -64,14 +64,12 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { if bucketCount == -1 { bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) - } + } else if bucketCount != arraySize { + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { + for i := range arraySize { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go index bf4f3b48c..4f1cac1f0 100644 --- a/vendor/github.com/prometheus/procfs/cmdline.go +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f0950bb49..5fe6cecd3 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 64cfd534c..8f155551e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index d88442f0e..e81a5db94 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index c11207f3a..4be2b1cc5 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index a6b2b3127..e713bae8d 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 003bc2ad4..0825aa1a8 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index 1c9b7313b..496770b05 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index fa3686bc0..b3228ce3d 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index a0ef55562..575eb022e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5f2a37a78..e4a5876ea 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index f9d961e44..26bfea071 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 9bdaccc7c..8f27912a1 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 1b5bdbdf8..3c53023c5 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 80df79c31..80fce4847 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index 7db863307..9dde85707 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -388,20 +388,21 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { } } case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { + switch strings.Split(fields[1], "=")[0] { + case "alo": err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) if err != nil { return &m, err } - } else if strings.Split(fields[1], "=")[0] == "inv" { + case "inv": err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, &m.CacheopSyncCacheInProgress) if err != nil { return &m, err } - } else { + default: err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3a43e8391..e7ccad66b 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 5a7d2df06..30c587201 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 71b7a70eb..0e41f71af 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index d5404a6d7..8318d8dfd 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index 1d86f5e63..15bb096ee 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go index fe2355d3c..e0ed671ea 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index bc3a20c93..5374da9fa 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/kernel_hung.go b/vendor/github.com/prometheus/procfs/kernel_hung.go new file mode 100644 index 000000000..539c11151 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_hung.go @@ -0,0 +1,45 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "os" + "strconv" + "strings" +) + +// KernelHung contains information about to the kernel's hung_task_detect_count number. +type KernelHung struct { + // Indicates the total number of tasks that have been detected as hung since the system boot. + // This file shows up if `CONFIG_DETECT_HUNG_TASK` is enabled. + HungTaskDetectCount *uint64 +} + +// KernelHung returns values from /proc/sys/kernel/hung_task_detect_count. +func (fs FS) KernelHung() (KernelHung, error) { + data, err := os.ReadFile(fs.proc.Path("sys", "kernel", "hung_task_detect_count")) + if err != nil { + return KernelHung{}, err + } + val, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return KernelHung{}, err + } + return KernelHung{ + HungTaskDetectCount: &val, + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index db88566bd..b66565a10 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 332e76c17..c8c78a65e 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 1fd4381b2..d66eeda82 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -27,13 +27,34 @@ var ( recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[(\d+)\](\([SF]+\))?`) + personalitiesPrefix = "Personalities : " ) +type MDStatComponent struct { + // Name of the component device. + Name string + // DescriptorIndex number of component device, e.g. the order in the superblock. + DescriptorIndex int32 + // Flags per Linux drivers/md/md.[ch] as of v6.12-rc1 + // Subset that are exposed in mdstat + WriteMostly bool + Journal bool + Faulty bool // "Faulty" is what kernel source uses for "(F)" + Spare bool + Replacement bool + // Some additional flags that are NOT exposed in procfs today; they may + // be available via sysfs. + // In_sync, Bitmap_sync, Blocked, WriteErrorSeen, FaultRecorded, + // BlockedBadBlocks, WantReplacement, Candidate, ... +} + // MDStat holds info parsed from /proc/mdstat. type MDStat struct { // Name of the device. Name string + // raid type of the device. + Type string // activity-state of the device. ActivityState string // Number of active disks. @@ -58,8 +79,8 @@ type MDStat struct { BlocksSyncedFinishTime float64 // current sync speed (in Kilobytes/sec) BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string + // component devices + Devices []MDStatComponent } // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of @@ -80,28 +101,52 @@ func (fs FS) MDStat() ([]MDStat, error) { // parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of // structs containing the relevant info. func parseMDStat(mdStatData []byte) ([]MDStat, error) { + // TODO: + // - parse global hotspares from the "unused devices" line. mdStats := []MDStat{} lines := strings.Split(string(mdStatData), "\n") + knownRaidTypes := make(map[string]bool) for i, line := range lines { if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { continue } + // Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] + if len(knownRaidTypes) == 0 && strings.HasPrefix(line, personalitiesPrefix) { + personalities := strings.Fields(line[len(personalitiesPrefix):]) + for _, word := range personalities { + word := word[1 : len(word)-1] + knownRaidTypes[word] = true + } + continue + } deviceFields := strings.Fields(line) if len(deviceFields) < 3 { return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive + state := deviceFields[2] // active, inactive, broken + + mdType := "unknown" // raid1, raid5, etc. + var deviceStartIndex int + if len(deviceFields) > 3 { // mdType may be in the 3rd or 4th field + if isRaidType(deviceFields[3], knownRaidTypes) { + mdType = deviceFields[3] + deviceStartIndex = 4 + } else if len(deviceFields) > 4 && isRaidType(deviceFields[4], knownRaidTypes) { + // if the 3rd field is (...), the 4th field is the mdType + mdType = deviceFields[4] + deviceStartIndex = 5 + } + } if len(lines) <= i+3 { return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } - // Failed disks have the suffix (F) & Spare disks have the suffix (S). + // Failed (Faulty) disks have the suffix (F) & Spare disks have the suffix (S). fail := int64(strings.Count(line, "(F)")) spare := int64(strings.Count(line, "(S)")) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) @@ -129,13 +174,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Append recovery and resyncing state info. if recovering || resyncing || checking || reshaping { - if recovering { + switch { + case recovering: state = "recovering" - } else if reshaping { + case reshaping: state = "reshaping" - } else if checking { + case checking: state = "checking" - } else { + default: state = "resyncing" } @@ -151,8 +197,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } } + devices, err := evalComponentDevices(deviceFields[deviceStartIndex:]) + if err != nil { + return nil, fmt.Errorf("error parsing components in md device %q: %w", mdName, err) + } + mdStats = append(mdStats, MDStat{ Name: mdName, + Type: mdType, ActivityState: state, DisksActive: active, DisksFailed: fail, @@ -165,14 +217,24 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), + Devices: devices, }) } return mdStats, nil } +// check if a string's format is like the mdType +// Rule 1: mdType should not be like (...) +// Rule 2: mdType should not be like sda[0] +// . +func isRaidType(mdType string, knownRaidTypes map[string]bool) bool { + _, ok := knownRaidTypes[mdType] + return !strings.ContainsAny(mdType, "([") && ok +} + func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + // e.g. 523968 blocks super 1.2 [4/4] [UUUU] statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) @@ -263,17 +325,29 @@ func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) +func evalComponentDevices(deviceFields []string) ([]MDStatComponent, error) { + mdComponentDevices := make([]MDStatComponent, 0) + for _, field := range deviceFields { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue } + descriptorIndex, err := strconv.ParseInt(match[2], 10, 32) + if err != nil { + return mdComponentDevices, fmt.Errorf("error parsing int from device %q: %w", match[2], err) + } + mdComponentDevices = append(mdComponentDevices, MDStatComponent{ + Name: match[1], + DescriptorIndex: int32(descriptorIndex), + // match may contain one or more of these + // https://github.com/torvalds/linux/blob/7ec462100ef9142344ddbf86f2c3008b97acddbe/drivers/md/md.c#L8376-L8392 + Faulty: strings.Contains(match[3], "(F)"), + Spare: strings.Contains(match[3], "(S)"), + Journal: strings.Contains(match[3], "(J)"), + Replacement: strings.Contains(match[3], "(R)"), + WriteMostly: strings.Contains(match[3], "(W)"), + }) } - return mdComponentDevices + return mdComponentDevices, nil } diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 937e1f960..342038318 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -307,7 +307,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { m.ZswapBytes = &valBytes case "Zswapped:": m.Zswapped = &val - m.ZswapBytes = &valBytes + m.ZswappedBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index a704c5e73..9414a12f4 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -147,8 +147,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { // mountOptionsParser parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { + for opt := range strings.SplitSeq(mountOptions, ",") { splitOption := strings.Split(opt, "=") if len(splitOption) < 2 { key := splitOption[0] @@ -178,3 +177,21 @@ func GetProcMounts(pid int) ([]*MountInfo, error) { } return parseMountInfo(data) } + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func (fs FS) GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("self/mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func (fs FS) GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%d/mountinfo", pid))) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 50caa7327..e503cb3a6 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -383,7 +383,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if stats.Opts == nil { stats.Opts = map[string]string{} } - for _, opt := range strings.Split(ss[1], ",") { + for opt := range strings.SplitSeq(ss[1], ",") { split := strings.Split(opt, "=") if len(split) == 2 { stats.Opts[split[0]] = split[1] diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 316df5fbb..e9ca35707 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index e66208aa0..7b3e1d61c 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go index f50b38e35..2a0f60f29 100644 --- a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "path/filepath" "strconv" "strings" ) @@ -56,7 +57,9 @@ func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { } for _, iFaceFile := range ifaceFiles { - f, err := os.Open(dir + "/" + iFaceFile.Name()) + filePath := filepath.Join(dir, iFaceFile.Name()) + + f, err := os.Open(filePath) if err != nil { return netDevSNMP6, err } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 19e3378f7..9291f8cd4 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8d4b1ac05..eaa996cbc 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -169,7 +169,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro &pc.EnterMemoryPressure, } - for i := 0; i < len(capabilities); i++ { + for i := range capabilities { switch capabilities[i] { case "y": *capabilityFields[i] = true diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go index deb7029fe..fa3812d9d 100644 --- a/vendor/github.com/prometheus/procfs/net_route.go +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index fae62b13d..8b221ebff 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -139,9 +139,6 @@ func parseSockstatKVs(kvs []string) (map[string]int, error) { func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { var nsp NetSockstatProtocol for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v switch k { case "inuse": nsp.InUse = v diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 71c8059f4..4a2dfa18f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 0396d7201..610ea78e5 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go index 13994c178..b1b3f6a6a 100644 --- a/vendor/github.com/prometheus/procfs/net_tls_stat.go +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -1,4 +1,4 @@ -// Copyright 2023 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go index 9ac3daf2d..8a3277910 100644 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d7e0cacb4..e4d635923 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7c597bc87..69d079445 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index 932ef2046..5a9f497d1 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -1,4 +1,4 @@ -// Copyright 2017 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 742dff453..dbdae4739 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/nfnetlink_queue.go b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go new file mode 100644 index 000000000..b0a73b11e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go @@ -0,0 +1,85 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +const nfNetLinkQueueFormat = "%d %d %d %d %d %d %d %d %d" + +// NFNetLinkQueue contains general information about netfilter queues found in /proc/net/netfilter/nfnetlink_queue. +type NFNetLinkQueue struct { + // id of the queue + QueueID uint + // pid of process handling the queue + PeerPID uint + // number of packets waiting for a decision + QueueTotal uint + // indicate how userspace receive packets + CopyMode uint + // size of copy + CopyRange uint + // number of items dropped by the kernel because too many packets were waiting a decision. + // It queue_total is superior to queue_max_len (1024 per default) the packets are dropped. + QueueDropped uint + // number of packets dropped by userspace (due to kernel send failure on the netlink socket) + QueueUserDropped uint + // sequence number of packets queued. It gives a correct approximation of the number of queued packets. + SequenceID uint + // internal value (number of entity using the queue) + Use uint +} + +// NFNetLinkQueue returns information about current state of netfilter queues. +func (fs FS) NFNetLinkQueue() ([]NFNetLinkQueue, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/netfilter/nfnetlink_queue")) + if err != nil { + return nil, err + } + + queue := []NFNetLinkQueue{} + if len(data) == 0 { + return queue, nil + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + nFNetLinkQueue, err := parseNFNetLinkQueueLine(line) + if err != nil { + return nil, err + } + queue = append(queue, *nFNetLinkQueue) + } + return queue, nil +} + +// parseNFNetLinkQueueLine parses each line of the /proc/net/netfilter/nfnetlink_queue file. +func parseNFNetLinkQueueLine(line string) (*NFNetLinkQueue, error) { + nFNetLinkQueue := NFNetLinkQueue{} + _, err := fmt.Sscanf( + line, nfNetLinkQueueFormat, + &nFNetLinkQueue.QueueID, &nFNetLinkQueue.PeerPID, &nFNetLinkQueue.QueueTotal, &nFNetLinkQueue.CopyMode, + &nFNetLinkQueue.CopyRange, &nFNetLinkQueue.QueueDropped, &nFNetLinkQueue.QueueUserDropped, &nFNetLinkQueue.SequenceID, &nFNetLinkQueue.Use, + ) + if err != nil { + return nil, err + } + return &nFNetLinkQueue, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 368187fa8..39c14aa55 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -49,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil || errors.Unwrap(err) == ErrMountPoint { + if err != nil || errors.Is(err, ErrMountPoint) { return Proc{}, err } return fs.Self() diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index 4a64347c0..535c08d6f 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 5dd493899..0b275c3b1 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -40,13 +40,13 @@ type CgroupSummary struct { // parseCgroupSummary parses each line of the /proc/cgroup file // Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { +func parseCgroupSummaryString(cgroupSummaryStr string) (*CgroupSummary, error) { var err error - fields := strings.Fields(CgroupSummaryStr) + fields := strings.Fields(cgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), cgroupSummaryStr) } CgroupSummary := &CgroupSummary{ diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 57a89895d..5b941de04 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index fa761b352..fa57761db 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -60,15 +60,16 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { text = scanner.Text() - if rPos.MatchString(text) { + switch { + case rPos.MatchString(text): pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { + case rFlags.MatchString(text): flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { + case rMntID.MatchString(text): mntid = rMntID.FindStringSubmatch(text)[1] - } else if rIno.MatchString(text) { + case rIno.MatchString(text): ino = rIno.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { + case rInotify.MatchString(text): newInotify, err := parseInotifyInfo(text) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 86b4b4524..b942c5072 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index d15b66ddb..dd8086ba2 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 9530b14bc..4b7d33784 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,6 +19,7 @@ import ( "os" "regexp" "strconv" + "strings" ) // ProcLimits represents the soft limits for each of the process's resource @@ -74,7 +75,7 @@ const ( ) var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) + limitsMatch = regexp.MustCompile(`(Max \w+\s??\w*\s?\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. @@ -106,7 +107,7 @@ func (p Proc) Limits() (ProcLimits, error) { return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } - switch fields[1] { + switch strings.TrimSpace(fields[1]) { case "Max cpu time": l.CPUTime, err = parseUint(fields[2]) case "Max file size": diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 7e75c286b..cc519f92f 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 4248c1716..7f94cc891 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 0f8f847f9..5fc0eb9e2 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index ccd35f153..cc2c5de87 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 9a297afcf..3e48afd1d 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 4bdc90b07..8d9a9bcd6 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index fb7fd3995..841fef464 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3328556bd..02e3f9e31 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go index ed5798424..b0a936016 100644 --- a/vendor/github.com/prometheus/procfs/proc_statm.go +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -1,4 +1,4 @@ -// Copyright 2025 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -80,7 +80,7 @@ func (p Proc) Statm() (ProcStatm, error) { func parseStatm(data []byte) ([]uint64, error) { var statmSlice []uint64 statmItems := strings.Fields(string(data)) - for i := 0; i < len(statmItems); i++ { + for i := range statmItems { statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index dd8aa5688..1ed2bced4 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,7 +16,7 @@ package procfs import ( "bytes" "math/bits" - "sort" + "slices" "strconv" "strings" @@ -94,8 +94,7 @@ func (p Proc) NewStatus() (ProcStatus, error) { s := ProcStatus{PID: p.PID} - lines := strings.Split(string(data), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(data), "\n") { if !bytes.Contains([]byte(line), []byte(":")) { continue } @@ -222,7 +221,7 @@ func calcCpusAllowedList(cpuString string) []uint64 { } - sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + slices.Sort(g) return g } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 3810d1ac9..52658a4d5 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 5f7f32dc8..fafd8dff7 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 8611c9017..32a04678a 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 403e6ae70..47b73a729 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index e36b41c18..593ad0f62 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,6 +16,7 @@ package procfs import ( "bufio" "bytes" + "errors" "fmt" "io" "strconv" @@ -92,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, &cpuStat.Guest, &cpuStat.GuestNice) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 65fec834b..ee17bf488 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index 80e0e947b..0cfbb5418 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 51c49d89e..2a8d76390 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index e54d94b09..806e17114 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -88,11 +88,9 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { zoneinfo := []Zoneinfo{} - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { + for block := range bytes.SplitSeq(zoneinfoData, []byte("\nNode")) { var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(block), "\n") { if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { zoneinfoElement.Node = nodeZone[1] diff --git a/vendor/github.com/schollz/progressbar/v3/progressbar.go b/vendor/github.com/schollz/progressbar/v3/progressbar.go index 0ccec7d37..99222a3c7 100644 --- a/vendor/github.com/schollz/progressbar/v3/progressbar.go +++ b/vendor/github.com/schollz/progressbar/v3/progressbar.go @@ -6,11 +6,11 @@ import ( "errors" "fmt" "io" - "log" "math" "net/http" "os" "regexp" + "runtime" "strings" "sync" "time" @@ -79,6 +79,8 @@ type config struct { // whether the output is expected to contain color codes colorCodes bool + // custom colors to use for colorCodes + customColors map[string]string // show rate of change in kB/sec or MB/sec showBytes bool @@ -280,6 +282,14 @@ func OptionEnableColorCodes(colorCodes bool) Option { } } +// OptionSetCutomColorCodes overrides DefaultColors for color codes +// using mitchellh/colorstring.Colorize in func customColorstringColor +func OptionSetCustomColorCodes(customColors map[string]string) Option { + return func(p *ProgressBar) { + p.config.customColors = customColors + } +} + // OptionSetElapsedTime will enable elapsed time. Always enabled if OptionSetPredictTime is true. func OptionSetElapsedTime(elapsedTime bool) Option { return func(p *ProgressBar) { @@ -449,17 +459,15 @@ func NewOptions64(max int64, options ...Option) *ProgressBar { go func() { ticker := time.NewTicker(b.config.spinnerChangeInterval) defer ticker.Stop() - for { - select { - case <-ticker.C: - if b.IsFinished() { - return - } - if b.IsStarted() { - b.lock.Lock() - b.render() - b.lock.Unlock() - } + + for range ticker.C { + if b.IsFinished() { + return + } + if b.IsStarted() { + b.lock.Lock() + b.render() + b.lock.Unlock() } } }() @@ -806,6 +814,13 @@ func (p *ProgressBar) Describe(description string) { if p.config.invisible { return } + // if already finished, re-render with the new description + if p.state.finished && !p.config.clearOnFinish && !p.config.useANSICodes { + clearProgressBar(p.config, p.state) + io.Copy(p.config.writer, &p.config.stdBuffer) + renderProgressBar(p.config, &p.state) + return + } p.render() } @@ -939,7 +954,13 @@ func (p *ProgressBar) render() error { if p.config.maxDetailRow > 0 { p.renderDetails() // put the cursor back to the last line of the details - writeString(p.config, fmt.Sprintf("\u001B[%dB\r\u001B[%dC", p.config.maxDetailRow, len(p.state.details[len(p.state.details)-1]))) + var lastDetailLength int + if len(p.state.details) == 0 { + lastDetailLength = 0 + } else { + lastDetailLength = len(p.state.details[len(p.state.details)-1]) + } + writeString(p.config, fmt.Sprintf("\u001B[%dB\r\u001B[%dC", p.config.maxDetailRow, lastDetailLength)) } if p.config.onCompletion != nil { p.config.onCompletion() @@ -976,14 +997,12 @@ func (p *ProgressBar) render() error { func (p *ProgressBar) lengthUnknown() { p.config.ignoreLength = true p.config.max = int64(p.config.width) - p.config.predictTime = false } // lengthKnown sets the progress bar to do not ignore the length func (p *ProgressBar) lengthKnown(max int64) { p.config.ignoreLength = false p.config.max = max - p.config.predictTime = true } // State returns the current state @@ -1014,36 +1033,68 @@ func (p *ProgressBar) State() State { // StartHTTPServer starts an HTTP server dedicated to serving progress bar updates. This allows you to // display the status in various UI elements, such as an OS status bar with an `xbar` extension. -// It is recommended to run this function in a separate goroutine to avoid blocking the main thread. +// When the progress bar is finished, call `server.Shutdown()` or `server.Close()` to shut it down manually. // // hostPort specifies the address and port to bind the server to, for example, "0.0.0.0:19999". -func (p *ProgressBar) StartHTTPServer(hostPort string) { - // for advanced users, we can return the data as json - http.HandleFunc("/state", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/json") - // since the state is a simple struct, we can just ignore the error +func (p *ProgressBar) StartHTTPServer(hostPort string) *http.Server { + mux := http.NewServeMux() + + // register routes + mux.HandleFunc("/state", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") bs, _ := json.Marshal(p.State()) w.Write(bs) }) - // for others, we just return the description in a plain text format - http.HandleFunc("/desc", func(w http.ResponseWriter, r *http.Request) { + + mux.HandleFunc("/desc", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") + state := p.State() fmt.Fprintf(w, "%d/%d, %.2f%%, %s left", - p.State().CurrentNum, p.State().Max, p.State().CurrentPercent*100, - (time.Second * time.Duration(p.State().SecondsLeft)).String(), + state.CurrentNum, state.Max, state.CurrentPercent*100, + (time.Second * time.Duration(state.SecondsLeft)).String(), ) }) - log.Fatal(http.ListenAndServe(hostPort, nil)) + + // create the server instance + server := &http.Server{ + Addr: hostPort, + Handler: mux, + } + + // start the server in a goroutine and ignore errors + go func() { + defer func() { + if err := recover(); err != nil { + fmt.Println("encounter panic: ", err) + } + }() + + _ = server.ListenAndServe() + }() + + // return the server instance for use by the caller + return server +} + +// use customstring.Colorize to customize color palette +func customColorstringColor(col map[string]string, str string) string { + cs := colorstring.Colorize{Colors: col, Reset: true} + return cs.Color(str) } // regex matching ansi escape codes var ansiRegex = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]`) -func getStringWidth(c config, str string, colorize bool) int { +func getStringWidth(c config, str string) int { if c.colorCodes { // convert any color codes in the progress bar into the respective ANSI codes - str = colorstring.Color(str) + // if customColors have been option set - create cs with Colorize + if len(c.customColors) > 0 { + str = customColorstringColor(c.customColors, str) + } else { + str = colorstring.Color(str) + } } // the width of the string, if printed to the console @@ -1168,7 +1219,7 @@ func renderProgressBar(c config, s *state) (int, error) { } if c.fullWidth && !c.ignoreLength { - width, err := termWidth() + width, err := termWidth(c.writer) if err != nil { width = 80 } @@ -1186,7 +1237,7 @@ func renderProgressBar(c config, s *state) (int, error) { amend += 1 // another space } - c.width = width - getStringWidth(c, c.description, true) - 10 - amend - sb.Len() - len(leftBrac) - len(rightBrac) + c.width = width - getStringWidth(c, c.description) - 10 - amend - sb.Len() - len(leftBrac) - len(rightBrac) s.currentSaucerSize = int(float64(s.currentPercent) / 100.0 * float64(c.width)) } if (s.currentSaucerSize > 0 || s.currentPercent > 0) && c.theme.BarStartFilled != "" { @@ -1330,12 +1381,21 @@ func renderProgressBar(c config, s *state) (int, error) { if c.colorCodes { // convert any color codes in the progress bar into the respective ANSI codes - str = colorstring.Color(str) + // if customColors have been option set - create cs with Colorize + if len(c.customColors) > 0 { + str = customColorstringColor(c.customColors, str) + } else { + str = colorstring.Color(str) + } + } + + if c.useANSICodes { + str = str + "\033[0K" } s.rendered = str - return getStringWidth(c, str, false), writeString(c, str) + return getStringWidth(c, str), writeString(c, str) } func clearProgressBar(c config, s state) error { @@ -1349,6 +1409,9 @@ func clearProgressBar(c config, s state) error { // fill the empty content // to overwrite the progress bar and jump // back to the beginning of the line + if runtime.GOOS == "windows" { + return writeString(c, "\r") + } str := fmt.Sprintf("\r%s\r", strings.Repeat(" ", s.maxLineWidth)) return writeString(c, str) // the following does not show correctly if the previous line is longer than subsequent line @@ -1394,7 +1457,7 @@ func (r *Reader) Read(p []byte) (n int, err error) { // Close the reader when it implements io.Closer func (r *Reader) Close() (err error) { if closer, ok := r.Reader.(io.Closer); ok { - return closer.Close() + err = closer.Close() } r.bar.Finish() return @@ -1456,12 +1519,15 @@ func logn(n, b float64) float64 { // termWidth function returns the visible width of the current terminal // and can be redefined for testing -var termWidth = func() (width int, err error) { - width, _, err = term.GetSize(int(os.Stdout.Fd())) - if err == nil { - return width, nil +var termWidth = func(w io.Writer) (width int, err error) { + if f, ok := w.(*os.File); ok { + width, _, err = term.GetSize(int(f.Fd())) + if err == nil { + return width, nil + } + } else { + err = errors.New("output is not a *os.File") } - return 0, err } diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml index 65dc28503..792db3618 100644 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -1,40 +1,67 @@ +version: "2" run: - # do not run on test files yet tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - linters: enable: - - megacheck - - govet + - asasalint + - asciicheck + - bidichk + - bodyclose + - contextcheck + - durationcheck + - errchkjson + - errorlint + - exhaustive + - gocheckcompilerdirectives + - gochecksumtype + - gosec + - gosmopolitan + - loggercheck + - makezero + - musttag + - nilerr + - nilnesserr + - noctx + - protogetter + - reassign + - recvcheck + - rowserrcheck + - spancheck + - sqlclosecheck + - testifylint + - unparam + - zerologlint disable: - - maligned - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false + settings: + errcheck: + check-type-assertions: false + check-blank: false + lll: + line-length: 100 + tab-width: 4 + prealloc: + simple: false + range-loops: false + for-loops: false + whitespace: + multi-if: false + multi-func: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 7567f6128..098608ff4 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -37,7 +37,7 @@ Features: # 1.6.0 Fixes: * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * revert the entry concurrency bug fix which leads to deadlock under some circumstances * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 Features: @@ -129,7 +129,7 @@ This new release introduces: which is mostly useful for logger wrapper * a fix reverting the immutability of the entry given as parameter to the hooks a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary + in a nested dictionary * a new SetOutput method in the Logger * a new configuration of the textformatter to configure the name of the default keys * a new configuration of the text formatter to disable the level truncation diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index d1d4a85fd..cc5dab7eb 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -40,7 +40,7 @@ plain text): ![Colored](http://i.imgur.com/PY7qMwd.png) -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +With `logrus.SetFormatter(&logrus.JSONFormatter{})`, for easy parsing by logstash or Splunk: ```text @@ -60,9 +60,9 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} "time":"2014-03-10 19:57:38.562543128 -0400 EDT"} ``` -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +With the default `logrus.SetFormatter(&logrus.TextFormatter{})` when a TTY is not attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: +[logfmt](https://pkg.go.dev/github.com/kr/logfmt) format: ```text time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 @@ -75,17 +75,18 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 To ensure this behaviour even if a TTY is attached, set your formatter as follows: ```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) +logrus.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + FullTimestamp: true, +}) ``` #### Logging Method Name If you wish to add the calling method as a field, instruct the logger via: + ```go -log.SetReportCaller(true) +logrus.SetReportCaller(true) ``` This adds the caller as 'method' like so: @@ -100,11 +101,11 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your environment via benchmarks: -``` + +```bash go test -bench=.*CallerTracing ``` - #### Case-sensitivity The organization's name was changed to lower-case--and this will not be changed @@ -118,12 +119,10 @@ The simplest way to use Logrus is simply the package-level exported logger: ```go package main -import ( - log "github.com/sirupsen/logrus" -) +import "github.com/sirupsen/logrus" func main() { - log.WithFields(log.Fields{ + logrus.WithFields(logrus.Fields{ "animal": "walrus", }).Info("A walrus appears") } @@ -139,6 +138,7 @@ package main import ( "os" + log "github.com/sirupsen/logrus" ) @@ -190,26 +190,27 @@ package main import ( "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() +var logger = logrus.New() func main() { // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout + // exported logger. See Godoc. + logger.Out = os.Stdout // You could set this to any `io.Writer` such as a file // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { - // log.Out = file + // logger.Out = file // } else { - // log.Info("Failed to log to file, using default stderr") + // logger.Info("Failed to log to file, using default stderr") // } - log.WithFields(logrus.Fields{ + logger.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") @@ -219,12 +220,12 @@ func main() { #### Fields Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +long, unparseable error messages. For example, instead of: `logrus.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: ```go -log.WithFields(log.Fields{ +logrus.WithFields(logrus.Fields{ "event": event, "topic": topic, "key": key, @@ -245,12 +246,12 @@ seen as a hint you should add a field, however, you can still use the Often it's helpful to have fields _always_ attached to log statements in an application or parts of one. For example, you may want to always log the `request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +`logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip})` on every line, you can create a `logrus.Entry` to pass around instead: ```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger := logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") // will log request_id and user_ip requestLogger.Warn("something not great happened") ``` @@ -264,28 +265,31 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in `init`: ```go +package main + import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" + + "github.com/sirupsen/logrus" + airbrake "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" ) func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) + logrus.AddHook(airbrake.NewHook(123, "xyz", "production")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { - log.Error("Unable to connect to local syslog daemon") + logrus.Error("Unable to connect to local syslog daemon") } else { - log.AddHook(hook) + logrus.AddHook(hook) } } ``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). +Note: Syslog hooks also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) @@ -295,15 +299,15 @@ A list of currently known service hooks can be found in this wiki [page](https:/ Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") +logrus.Trace("Something very low level.") +logrus.Debug("Useful debugging information.") +logrus.Info("Something noteworthy happened!") +logrus.Warn("You should probably take a look at this.") +logrus.Error("Something failed but I'm not quitting.") // Calls os.Exit(1) after logging -log.Fatal("Bye.") +logrus.Fatal("Bye.") // Calls panic() after logging -log.Panic("I'm bailing.") +logrus.Panic("I'm bailing.") ``` You can set the logging level on a `Logger`, then it will only log entries with @@ -311,13 +315,13 @@ that severity or anything above it: ```go // Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) +logrus.SetLevel(logrus.InfoLevel) ``` -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +It may be useful to set `logrus.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. -Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). +Note: If you want different log levels for global (`logrus.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). #### Entries @@ -340,17 +344,17 @@ could do: ```go import ( - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) + logrus.SetFormatter(&logrus.JSONFormatter{}) } else { // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) + logrus.SetFormatter(&logrus.TextFormatter{}) } } ``` @@ -372,11 +376,11 @@ The built-in logging formatters are: * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#JSONFormatter). -Third party logging formatters: +Third-party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). @@ -384,7 +388,7 @@ Third party logging formatters: * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Save log to files. * [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, @@ -393,10 +397,9 @@ requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a default ones (see Entries section above): ```go -type MyJSONFormatter struct { -} +type MyJSONFormatter struct{} -log.SetFormatter(new(MyJSONFormatter)) +logrus.SetFormatter(new(MyJSONFormatter)) func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // Note this doesn't include Time, Level and Message which are available on @@ -455,17 +458,18 @@ entries. It should not be a feature of the application-level logger. #### Testing -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: +Logrus has a built-in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: * decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go import( + "testing" + "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" - "testing" ) func TestSomething(t*testing.T){ @@ -486,15 +490,15 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. +to gracefully shut down. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. -``` -... +```go +// ... handler := func() { - // gracefully shutdown something... + // gracefully shut down something... } logrus.RegisterExitHandler(handler) -... +// ... ``` #### Thread safety @@ -502,7 +506,7 @@ logrus.RegisterExitHandler(handler) By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. -Situation when locking is not needed includes: +Situations when locking is not needed include: * You have no hooks registered, or hooks calling is already thread-safe. diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index df9d65c3a..e90f09ea6 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,12 @@ -version: "{build}" +# Minimal stub to satisfy AppVeyor CI +version: 1.0.{build} platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath +shallow_clone: true + branches: only: - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version + - main + build_script: - - go get -t - - go test + - echo "No-op build to satisfy AppVeyor CI" diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 71cdbbc35..71d796d0b 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -34,13 +34,15 @@ func init() { minimumCallerDepth = 1 } -// Defines the key when adding errors using WithError. +// ErrorKey defines the key when adding errors using [WithError], [Logger.WithError]. var ErrorKey = "error" -// An entry is the final or intermediate Logrus logging entry. It contains all +// Entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Trace, Debug, // Info, Warn, Error, Fatal or Panic is called on it. These objects can be // reused and passed around as much as you wish to avoid field duplication. +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Entry struct { Logger *Logger @@ -86,12 +88,12 @@ func (entry *Entry) Dup() *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} } -// Returns the bytes representation of this entry from the formatter. +// Bytes returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } -// Returns the string representation from the reader and ultimately the +// String returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { serialized, err := entry.Bytes() @@ -102,12 +104,13 @@ func (entry *Entry) String() (string, error) { return str, nil } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. +// WithError adds an error as single field (using the key defined in [ErrorKey]) +// to the Entry. func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } -// Add a context to the Entry. +// WithContext adds a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -116,12 +119,12 @@ func (entry *Entry) WithContext(ctx context.Context) *Entry { return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } -// Add a single field to the Entry. +// WithField adds a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } -// Add a map of fields to the Entry. +// WithFields adds a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { @@ -150,7 +153,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } -// Overrides the time of the Entry. +// WithTime overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -204,7 +207,7 @@ func getCaller() *runtime.Frame { // If the caller isn't part of this package, we're done if pkg != logrusPackage { - return &f //nolint:scopelint + return &f } } @@ -432,7 +435,7 @@ func (entry *Entry) Panicln(args ...interface{}) { entry.Logln(PanicLevel, args...) } -// Sprintlnn => Sprint no newline. This is to get the behavior of how +// sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go index 3f151cdc3..9ab978a45 100644 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -1,16 +1,16 @@ package logrus -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not +// Hook describes hooks to be fired when logging on the logging levels returned from +// [Hook.Levels] on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for +// functionality yourself if your call is non-blocking, and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } -// Internal type for storing the hooks on a logger instance. +// LevelHooks is an internal type for storing the hooks on a logger instance. type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 5ff0aef6d..f5b8c439e 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -72,16 +72,16 @@ func (mw *MutexWrap) Disable() { mw.disabled = true } -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just +// New Creates a new logger. Configuration should be set by changing [Formatter], +// Out and Hooks directly on the default Logger instance. You can also just // instantiate your own: // -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } // // It's recommended to make this a global instance called `log`. func New() *Logger { @@ -118,30 +118,30 @@ func (logger *Logger) WithField(key string, value interface{}) *Entry { return entry.WithField(key, value) } -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. +// WithFields adds a struct of fields to the log entry. It calls [Entry.WithField] +// for each Field. func (logger *Logger) WithFields(fields Fields) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithFields(fields) } -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. +// WithError adds an error as single field to the log entry. It calls +// [Entry.WithError] for the given error. func (logger *Logger) WithError(err error) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithError(err) } -// Add a context to the log entry. +// WithContext add a context to the log entry. func (logger *Logger) WithContext(ctx context.Context) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithContext(ctx) } -// Overrides the time of the log entry. +// WithTime overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) @@ -347,9 +347,9 @@ func (logger *Logger) Exit(code int) { logger.ExitFunc(code) } -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. +// SetNoLock disables the lock for situations where a file is opened with +// appending mode, and safe for concurrent writes to the file (within 4k +// message on Linux). In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 2f16224cb..37fc4fef8 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -6,13 +6,15 @@ import ( "strings" ) -// Fields type, used to pass to `WithFields`. +// Fields type, used to pass to [WithFields]. type Fields map[string]interface{} // Level type +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Level uint32 -// Convert the Level to a string. E.g. PanicLevel becomes "panic". +// Convert the Level to a string. E.g. [PanicLevel] becomes "panic". func (level Level) String() string { if b, err := level.MarshalText(); err == nil { return string(b) @@ -77,7 +79,7 @@ func (level Level) MarshalText() ([]byte, error) { return nil, fmt.Errorf("not a valid logrus level %d", level) } -// A constant exposing all logging levels +// AllLevels exposing all logging levels. var AllLevels = []Level{ PanicLevel, FatalLevel, @@ -119,8 +121,8 @@ var ( ) // StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. +// it'll accept a stdlib logger ([log.Logger]) and a logrus logger. +// There's no standard interface, so this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) @@ -135,7 +137,8 @@ type StdLogger interface { Panicln(...interface{}) } -// The FieldLogger interface generalizes the Entry and Logger types +// FieldLogger extends the [StdLogger] interface, generalizing +// the [Entry] and [Logger] types. type FieldLogger interface { WithField(key string, value interface{}) *Entry WithFields(fields Fields) *Entry @@ -176,8 +179,9 @@ type FieldLogger interface { // IsPanicEnabled() bool } -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. +// Ext1FieldLogger (the first extension to [FieldLogger]) is superfluous, it is +// here for consistency. Do not use. Use [FieldLogger], [Logger] or [Entry] +// instead. type Ext1FieldLogger interface { FieldLogger Tracef(format string, args ...interface{}) diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 499789984..69956b425 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd hurd // +build !js package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 04748b851..c9aed267a 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,5 +1,7 @@ +//go:build (linux || aix || zos) && !js && !wasi // +build linux aix zos // +build !js +// +build !wasi package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go new file mode 100644 index 000000000..2822b212f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go @@ -0,0 +1,8 @@ +//go:build wasi +// +build wasi + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go new file mode 100644 index 000000000..108a6be12 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go @@ -0,0 +1,8 @@ +//go:build wasip1 +// +build wasip1 + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index be2c6efe5..6dfeb18b1 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -306,6 +306,7 @@ func (f *TextFormatter) needsQuoting(text string) bool { return false } for _, ch := range text { + //nolint:staticcheck // QF1001: could apply De Morgan's law if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || @@ -334,6 +335,6 @@ func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { if !f.needsQuoting(stringVal) { b.WriteString(stringVal) } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) + fmt.Fprintf(b, "%q", stringVal) } } diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 170e04d14..58b7fbe5b 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -20,7 +20,6 @@ Package [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) pro * Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286). (This also properly handles cert algorithms for hosts using CA keys when [using the NewDB constructor](#enhancements-requiring-extra-parsing) added in skeema/knownhosts v1.3.0.) * Properly match wildcard hostname known_hosts entries regardless of port number, providing a solution for [golang/go#52056](https://github.com/golang/go/issues/52056). (Added in v1.3.0; requires [using the NewDB constructor](#enhancements-requiring-extra-parsing)) * Write new known_hosts entries to an io.Writer -* Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463) * Easily determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet ## How host key lookup works diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go index 2b7536e0d..4a8b6be09 100644 --- a/vendor/github.com/skeema/knownhosts/knownhosts.go +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -194,11 +194,9 @@ func (hkdb *HostKeyDB) HostKeys(hostWithPort string) (keys []PublicKey) { // in the known_hosts file will properly be converted to the corresponding // ssh.CertAlgo* values. func (hkdb *HostKeyDB) HostKeyAlgorithms(hostWithPort string) (algos []string) { - // We ensure that algos never contains duplicates. This is done for robustness - // even though currently golang.org/x/crypto/ssh/knownhosts never exposes - // multiple keys of the same type. This way our behavior here is unaffected - // even if https://github.com/golang/go/issues/28870 is implemented, for - // example by https://github.com/golang/crypto/pull/254. + // We ensure that the return value never contains duplicates. This is needed + // since golang.org/x/crypto/ssh/knownhosts can now return multiple keys of + // the same type after https://github.com/golang/crypto/pull/254 was merged. hostKeys := hkdb.HostKeys(hostWithPort) seen := make(map[string]struct{}, len(hostKeys)) addAlgo := func(typ string, cert bool) { @@ -367,26 +365,23 @@ func IsHostUnknown(err error) bool { } // Normalize normalizes an address into the form used in known_hosts. This -// implementation includes a fix for https://github.com/golang/go/issues/53463 -// and will omit brackets around ipv6 addresses on standard port 22. +// implementation fixes the buggy IPv6 edge-cases found in golang.org/x/crypto +// below v0.42.0; see https://github.com/golang/go/issues/53463. In all other +// cases, this simply delegates to the upstream Normalize implementation. func Normalize(address string) string { - host, port, err := net.SplitHostPort(address) - if err != nil { - host = address - port = "22" + // Although our go.mod specifies a new-enough golang.org/x/crypto to avoid + // the IPv6 bug, this logic will remain in-place for sake of robustness in + // non-go.mod use-cases (OS package managers, hacky forks like go-git's, etc) + result := xknownhosts.Normalize(address) + if strings.HasSuffix(result, "]") && strings.HasPrefix(result, "[") { + return result[1 : len(result)-1] } - entry := host - if port != "22" { - entry = "[" + entry + "]:" + port - } else if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - entry = entry[1 : len(entry)-1] - } - return entry + return result } // Line returns a line to append to the known_hosts files. This implementation // uses the local patched implementation of Normalize in order to solve -// https://github.com/golang/go/issues/53463. +// https://github.com/golang/go/issues/53463 when using x/crypto below v0.42.0. func Line(addresses []string, key ssh.PublicKey) string { var trimmed []string for _, a := range addresses { diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 6acf8ab1e..104dc2440 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -57,3 +57,10 @@ linters: - common-false-positives - legacy - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 78088db69..c05fed45a 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -1939,7 +1939,7 @@ type tmplFunc struct { fn func(io.Writer, interface{}) error } -var defaultUsageTemplate = `Usage:{{if .Runnable}} +const defaultUsageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} @@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { return nil } -var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` @@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error { return nil } -var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} ` // defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go index 371971703..20950fea1 100644 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -24,7 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type cmdOption struct { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 521daa25d..e980ab62b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -14,9 +14,15 @@ import ( // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. // Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. +// +// Deprecated: [DefaultClient] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. +// +// Deprecated: [Get] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody) if err != nil { @@ -26,6 +32,9 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) } // Head is a convenient replacement for http.Head that adds a span around the request. +// +// Deprecated: [Head] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody) if err != nil { @@ -35,6 +44,9 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error } // Post is a convenient replacement for http.Post that adds a span around the request. +// +// Deprecated: [Post] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { @@ -45,6 +57,9 @@ func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (r } // PostForm is a convenient replacement for http.PostForm that adds a span around the request. +// +// Deprecated: [PostForm] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 38fb79c03..c3be78616 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -26,7 +26,6 @@ type config struct { Meter metric.Meter Propagators propagation.TextMapPropagator SpanStartOptions []trace.SpanStartOption - PublicEndpoint bool PublicEndpointFn func(*http.Request) bool ReadEvent bool WriteEvent bool @@ -96,17 +95,19 @@ func WithMeterProvider(provider metric.MeterProvider) Option { // WithPublicEndpoint configures the Handler to link the span with an incoming // span context. If this option is not provided, then the association is a child // association instead of a link. +// +// Deprecated: Use [WithPublicEndpointFn] instead. +// To migrate, replace WithPublicEndpoint() with: +// +// WithPublicEndpointFn(func(*http.Request) bool { return true }) func WithPublicEndpoint() Option { - return optionFunc(func(c *config) { - c.PublicEndpoint = true - }) + return WithPublicEndpointFn(func(*http.Request) bool { return true }) } // WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. -// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. func WithPublicEndpointFn(fn func(*http.Request) bool) Option { return optionFunc(func(c *config) { c.PublicEndpointFn = fn @@ -143,11 +144,13 @@ func WithFilter(f Filter) Option { }) } -type event int +// Event represents message event types for [WithMessageEvents]. +type Event int // Different types of events that can be recorded, see WithMessageEvents. const ( - ReadEvents event = iota + unspecifiedEvents Event = iota + ReadEvents WriteEvents ) @@ -160,7 +163,7 @@ const ( // using the ReadBytesKey // - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write // using the WriteBytesKey -func WithMessageEvents(events ...event) Option { +func WithMessageEvents(events ...Event) Option { return optionFunc(func(c *config) { for _, e := range events { switch e { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 56b24b982..1c9aa3ff4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -2,6 +2,5 @@ // SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended -// to be used to add tracing by wrapping existing handlers (with Handler) and -// routes WithRouteTag. +// to be used to add tracing by wrapping existing handlers. package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index fef83b42f..c1bbf3a3c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -29,7 +29,6 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - publicEndpoint bool publicEndpointFn func(*http.Request) bool metricAttributesFn func(*http.Request) []attribute.KeyValue @@ -77,7 +76,6 @@ func (h *middleware) configure(c *config) { h.writeEvent = c.WriteEvent h.filters = c.Filters h.spanNameFormatter = c.SpanNameFormatter - h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) @@ -102,7 +100,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } opts = append(opts, h.spanStartOptions...) - if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + if h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx)) { opts = append(opts, trace.WithNewRoot()) // Linking incoming span context if any for public endpoint. if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { @@ -224,6 +222,9 @@ func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.Ke // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. +// +// Deprecated: spans are automatically annotated with the route attribute. +// To annotate metrics, use the [WithMetricAttributesFn] option. func WithRouteTag(route string, h http.Handler) http.Handler { attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go new file mode 100644 index 000000000..45d3d934f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go @@ -0,0 +1,305 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/client.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "reflect" + "slices" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type HTTPClient struct{ + requestBodySize httpconv.ClientRequestBodySize + requestDuration httpconv.ClientRequestDuration +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + client := HTTPClient{} + + var err error + client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) + handleErr(err) + + client.requestDuration, err = httpconv.NewClientRequestDuration( + meter, + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return client +} + +func (n HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconv.URLFull(u)) + + attrs = append(attrs, semconv.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconv.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconv.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconv.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n HTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconv.ErrorTypeOther + } + + return semconv.ErrorTypeKey.String(value) +} + +func (n HTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconv.ServerAddress(requestHost), + n.scheme(req), + ) + + if port > 0 { + attributes = append(attributes, semconv.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (n HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := n.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + + return opts +} + +func (n HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + n.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + n.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) +} + +// TraceAttributes returns attributes for httptrace. +func (n HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconv.ServerAddress(host), + } +} + +func (n HTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconv.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go deleted file mode 100644 index 821b80ec4..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/env.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "context" - "fmt" - "net/http" - "strings" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" -) - -// OTelSemConvStabilityOptIn is an environment variable. -// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. -const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" - -type ResponseTelemetry struct { - StatusCode int - ReadBytes int64 - ReadError error - WriteBytes int64 - WriteError error -} - -type HTTPServer struct { - requestBodySizeHistogram httpconv.ServerRequestBodySize - responseBodySizeHistogram httpconv.ServerResponseBodySize - requestDurationHistogram httpconv.ServerRequestDuration -} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) -} - -func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { - return []attribute.KeyValue{ - CurrentHTTPServer{}.NetworkTransportAttr(network), - } -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - return CurrentHTTPServer{}.ResponseTraceAttrs(resp) -} - -// Route returns the attribute for the route. -func (s HTTPServer) Route(route string) attribute.KeyValue { - return CurrentHTTPServer{}.Route(route) -} - -// Status returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (s HTTPServer) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} - -type ServerMetricData struct { - ServerName string - ResponseSize int64 - - MetricData - MetricAttributes -} - -type MetricAttributes struct { - Req *http.Request - StatusCode int - AdditionalAttributes []attribute.KeyValue -} - -type MetricData struct { - RequestSize int64 - - // The request duration, in milliseconds - ElapsedTime float64 -} - -var ( - metricAddOptionPool = &sync.Pool{ - New: func() any { - return &[]metric.AddOption{} - }, - } - - metricRecordOptionPool = &sync.Pool{ - New: func() any { - return &[]metric.RecordOption{} - }, - } -) - -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) - *recordOpts = append(*recordOpts, o) - s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) - s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) - s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) - *recordOpts = (*recordOpts)[:0] - metricRecordOptionPool.Put(recordOpts) -} - -// hasOptIn returns true if the comma-separated version string contains the -// exact optIn value. -func hasOptIn(version, optIn string) bool { - for _, v := range strings.Split(version, ",") { - if strings.TrimSpace(v) == optIn { - return true - } - } - return false -} - -func NewHTTPServer(meter metric.Meter) HTTPServer { - server := HTTPServer{} - - var err error - server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) - handleErr(err) - - server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter) - handleErr(err) - - server.requestDurationHistogram, err = httpconv.NewServerRequestDuration( - meter, - metric.WithExplicitBucketBoundaries( - 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, - 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, - ), - ) - handleErr(err) - return server -} - -type HTTPClient struct { - requestBodySize httpconv.ClientRequestBodySize - requestDuration httpconv.ClientRequestDuration -} - -func NewHTTPClient(meter metric.Meter) HTTPClient { - client := HTTPClient{} - - var err error - client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) - handleErr(err) - - client.requestDuration, err = httpconv.NewClientRequestDuration( - meter, - metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), - ) - handleErr(err) - - return client -} - -// RequestTraceAttrs returns attributes for an HTTP request made by a client. -func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return CurrentHTTPClient{}.RequestTraceAttrs(req) -} - -// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. -func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return CurrentHTTPClient{}.ResponseTraceAttrs(resp) -} - -func (c HTTPClient) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -func (c HTTPClient) ErrorType(err error) attribute.KeyValue { - return CurrentHTTPClient{}.ErrorType(err) -} - -type MetricOpts struct { - measurement metric.MeasurementOption - addOptions metric.AddOption -} - -func (o MetricOpts) MeasurementOption() metric.MeasurementOption { - return o.measurement -} - -func (o MetricOpts) AddOptions() metric.AddOption { - return o.addOptions -} - -func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { - opts := map[string]MetricOpts{} - - attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["new"] = MetricOpts{ - measurement: set, - addOptions: set, - } - - return opts -} - -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { - s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) - s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) -} - -func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return CurrentHTTPClient{}.TraceAttributes(host) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go index 1bb207b80..a8a0d58df 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -6,10 +6,10 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ // Generate semconv package: //go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go deleted file mode 100644 index 28c51a3b3..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ /dev/null @@ -1,517 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv provides OpenTelemetry semantic convention types and -// functionality. -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "fmt" - "net/http" - "reflect" - "slices" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" -) - -type RequestTraceAttrsOpts struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -type CurrentHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - count := 3 // ServerAddress, Method, Scheme - - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - count++ - } - - method, methodOriginal := n.method(req.Method) - if methodOriginal != (attribute.KeyValue{}) { - count++ - } - - scheme := n.scheme(req.TLS != nil) - - peer, peerPort := SplitHostPort(req.RemoteAddr) - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - count++ - if peerPort > 0 { - count++ - } - } - - useragent := req.UserAgent() - if useragent != "" { - count++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - count++ - } - - if req.URL != nil && req.URL.Path != "" { - count++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - count++ - } - if protoVersion != "" { - count++ - } - - route := httpRoute(req.Pattern) - if route != "" { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - attrs = append(attrs, - semconvNew.ServerAddress(host), - method, - scheme, - ) - - if hostPort > 0 { - attrs = append(attrs, semconvNew.ServerPort(hostPort)) - } - if methodOriginal != (attribute.KeyValue{}) { - attrs = append(attrs, methodOriginal) - } - - if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) - if peerPort > 0 { - attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, semconvNew.ClientAddress(clientIP)) - } - - if req.URL != nil && req.URL.Path != "" { - attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if route != "" { - attrs = append(attrs, n.Route(route)) - } - - return attrs -} - -func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return semconvNew.NetworkTransportTCP - case "udp", "udp4", "udp6": - return semconvNew.NetworkTransportUDP - case "unix", "unixgram", "unixpacket": - return semconvNew.NetworkTransportUnix - default: - return semconvNew.NetworkTransportPipe - } -} - -func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter - if https { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP -// response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will -// be omitted. -func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - var count int - - if resp.ReadBytes > 0 { - count++ - } - if resp.WriteBytes > 0 { - count++ - } - if resp.StatusCode > 0 { - count++ - } - - attributes := make([]attribute.KeyValue, 0, count) - - if resp.ReadBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), - ) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), - ) - } - if resp.StatusCode > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseStatusCode(resp.StatusCode), - ) - } - - return attributes -} - -// Route returns the attribute for the route. -func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { - return semconvNew.HTTPRoute(route) -} - -func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - num++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - n.scheme(req.TLS != nil), - semconvNew.ServerAddress(host)) - - if hostPort > 0 { - attributes = append(attributes, semconvNew.ServerPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -type CurrentHTTPClient struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - /* - below attributes are returned: - - http.request.method - - http.request.method.original - - url.full - - server.address - - server.port - - network.protocol.name - - network.protocol.version - */ - numOfAttributes := 3 // URL, server address, proto, and method. - - var urlHost string - if req.URL != nil { - urlHost = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if eligiblePort > 0 { - numOfAttributes++ - } - useragent := req.UserAgent() - if useragent != "" { - numOfAttributes++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - numOfAttributes++ - } - if protoVersion != "" { - numOfAttributes++ - } - - method, originalMethod := n.method(req.Method) - if originalMethod != (attribute.KeyValue{}) { - numOfAttributes++ - } - - attrs := make([]attribute.KeyValue, 0, numOfAttributes) - - attrs = append(attrs, method) - if originalMethod != (attribute.KeyValue{}) { - attrs = append(attrs, originalMethod) - } - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, semconvNew.URLFull(u)) - - attrs = append(attrs, semconvNew.ServerAddress(requestHost)) - if eligiblePort > 0 { - attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - return attrs -} - -// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - /* - below attributes are returned: - - http.response.status_code - - error.type - */ - var count int - if resp.StatusCode > 0 { - count++ - } - - if isErrorStatusCode(resp.StatusCode) { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - if resp.StatusCode > 0 { - attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) - } - - if isErrorStatusCode(resp.StatusCode) { - errorType := strconv.Itoa(resp.StatusCode) - attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) - } - return attrs -} - -func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - - if value == "" { - return semconvNew.ErrorTypeOther - } - - return semconvNew.ErrorTypeKey.String(value) -} - -func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 2 - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - num++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - semconvNew.ServerAddress(requestHost), - n.scheme(req), - ) - - if port > 0 { - attributes = append(attributes, semconvNew.ServerPort(port)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -// TraceAttributes returns attributes for httptrace. -func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return []attribute.KeyValue{ - semconvNew.ServerAddress(host), - } -} - -func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { - if req.URL != nil && req.URL.Scheme != "" { - return semconvNew.URLScheme(req.URL.Scheme) - } - if req.TLS != nil { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -func isErrorStatusCode(code int) bool { - return code >= 400 || code < 100 -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go new file mode 100644 index 000000000..5ae6a0738 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go @@ -0,0 +1,403 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/server.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "slices" + "strings" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct{ + requestBodySizeHistogram httpconv.ServerRequestBodySize + responseBodySizeHistogram httpconv.ServerResponseBodySize + requestDurationHistogram httpconv.ServerRequestDuration +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + server := HTTPServer{} + + var err error + server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) + handleErr(err) + + server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter) + handleErr(err) + + server.requestDurationHistogram, err = httpconv.NewServerRequestDuration( + meter, + metric.WithExplicitBucketBoundaries( + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, + 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + ), + ) + handleErr(err) + return server +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (n HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconv.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconv.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconv.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconv.NetworkPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, semconv.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconv.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconv.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + if route != "" { + attrs = append(attrs, n.Route(route)) + } + + return attrs +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + attr := semconv.NetworkTransportPipe + switch network { + case "tcp", "tcp4", "tcp6": + attr = semconv.NetworkTransportTCP + case "udp", "udp4", "udp6": + attr = semconv.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + attr = semconv.NetworkTransportUnix + } + + return []attribute.KeyValue{attr} +} + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + Route string + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + + // The request duration, in milliseconds + ElapsedTime float64 +} + +var ( + metricAddOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.RecordOption{} + }, + } +) + +func (n HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + attributes := n.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.Route, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + n.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) + n.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) + n.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) +} + +func (n HTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter + if https { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. +func (n HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconv.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconv.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconv.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n HTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +func (n HTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, route string, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + if route != "" { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconv.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + + if route != "" { + attributes = append(attributes, semconv.HTTPRoute(route)) + } + return attributes +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index dfb53cf1f..6e096da5e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.63.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 2b53a25e1..a6d0cbcc9 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -8,3 +8,4 @@ nam valu thirdparty addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index b01762ffc..1b1b2aff9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -197,6 +197,9 @@ linters: - float-compare - go-require - require-error + usetesting: + context-background: true + context-todo: true exclusions: generated: lax presets: diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 532850588..994b677df 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,4 +1,5 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects @@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual -http://4.3.2.1:78/user/123 \ No newline at end of file +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index f3abcfdc2..ecbe0582c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + ## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 This release is the last to support [Go 1.23]. @@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 0b3ae855c..ff5e1f76e 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,7 +108,7 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * At least one of the qualified approvals need to be from an + * At least one of the qualified approvals needs to be from an [Approver]/[Maintainer] affiliated with a different company than the author of the PR. * PRs introducing changes that have already been discussed and consensus @@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -217,7 +216,7 @@ about dependency compatibility. This project does not partition dependencies based on the environment (i.e. `development`, `staging`, `production`). -Only the dependencies explicitly included in the released modules have be +Only the dependencies explicitly included in the released modules have been tested and verified to work with the released code. No other guarantee is made about the compatibility of other dependencies. @@ -635,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -657,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. +### Observability + +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. + +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. + +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + ## Approvers and Maintainers ### Maintainers @@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt ### Triagers - [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). @@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index bc0f1f92d..44870248c 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -146,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 6b7ab5f21..c63359543 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -55,25 +55,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | | Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| macOS 13 | 1.25 | amd64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | | macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | | Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | | Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1ddcdef03..861756fd7 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -107,33 +107,49 @@ It is critical you make sure the version you push upstream is correct. ... ``` +## Sign artifacts + +To ensure we comply with CNCF best practices, we need to sign the release artifacts. + +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. + +You can use [this script] to verify the contents of the archives before signing them. + +To find your GPG key ID, run: + +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: + +```terminal +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip +``` + +You can verify the signatures with: + +```terminal +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip +``` + +[this script]: https://github.com/MrAlias/attest-sh + ## Release Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -### Sign the Release Artifact - -To ensure we comply with CNCF best practices, we need to sign the release artifact. -The tarball attached to the GitHub release needs to be signed with your GPG key. - -Follow [these steps] to sign the release artifact and upload it to GitHub. -You can use [this script] to verify the contents of the tarball before signing it. - -Be sure to use the correct GPG key when signing the release artifact. - -```terminal -gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz -``` - -You can verify the signature with: - -```terminal -gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz -``` - -[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases -[this script]: https://github.com/MrAlias/attest-sh +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. ## Post-Release @@ -160,14 +176,6 @@ This helps track what changes were included in each release. Once all related issues and PRs have been added to the milestone, close the milestone. -### Demo Repository - -Bump the dependencies in the following Go services: - -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) - ### Close the `Version Release` issue Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c1..b27c9e84f 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6333d34b3..6cc1a1655 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 000000000..6aa69aeae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 000000000..113a97838 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 64735d382..911d557ee 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to ensure equivalence stability: comparisons will - // return the same value across versions. For this reason, Distinct should - // always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface any + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } // Valid reports whether this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +func (d Distinct) Valid() bool { return d.hash != 0 } + +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } // Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) any { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) any { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b2477..24f1fa37d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f83a448ec..78e98c4c0 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index a311fbb48..cadb87cc0 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 492480f8c..756bf7964 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -16,7 +16,6 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" @@ -101,7 +100,7 @@ func (c *client) Shutdown(ctx context.Context) error { // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) (uploadErr error) { // The otlpmetric.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. @@ -116,7 +115,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou ctx, cancel := c.exportContext(ctx) defer cancel() - return c.requestFunc(ctx, func(iCtx context.Context) error { + return errors.Join(uploadErr, c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, }) @@ -124,8 +123,8 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedDataPoints() if n != 0 || msg != "" { - err := internal.MetricPartialSuccessError(n, msg) - otel.Handle(err) + e := internal.MetricPartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, e) } } // nil is converted to OK. @@ -134,7 +133,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return nil } return err - }) + })) } // exportContext returns a copy of parent with an appropriate deadline and diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go index b29cd11a6..f6e4f0d07 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go @@ -20,7 +20,7 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/o //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go -//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go index b54a173b6..7f947273b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -18,7 +18,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. @@ -165,11 +164,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": - fn(cumulativeTemporality) + fn(metric.CumulativeTemporalitySelector) case "delta": - fn(deltaTemporality) + fn(metric.DeltaTemporalitySelector) case "lowmemory": - fn(lowMemory) + fn(metric.LowMemoryTemporalitySelector) default: global.Warn( "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", @@ -181,28 +180,6 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) } } -func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go index 6af5591ea..243abcb17 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index 80691ac3a..c14e5ae78 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 7909cac56..1e2446fe4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 379bc8170..d431fc451 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -113,7 +113,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { if psid := sd.Parent().SpanID(); psid.IsValid() { s.ParentSpanId = psid[:] } - s.Flags = buildSpanFlags(sd.Parent()) + s.Flags = buildSpanFlagsWith(sd.SpanContext().TraceFlags(), sd.Parent()) return s } @@ -159,7 +159,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() - flags := buildSpanFlags(otLink.SpanContext) + flags := buildSpanFlagsWith(otLink.SpanContext.TraceFlags(), otLink.SpanContext) sl = append(sl, &tracepb.Span_Link{ TraceId: tid[:], @@ -172,13 +172,15 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { return sl } -func buildSpanFlags(sc trace.SpanContext) uint32 { - flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK - if sc.IsRemote() { - flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK +func buildSpanFlagsWith(tf trace.TraceFlags, parent trace.SpanContext) uint32 { + // Lower 8 bits are the W3C TraceFlags; always indicate that we know whether the parent is remote + flags := uint32(tf) | uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) + // Set the parent-is-remote bit when applicable + if parent.IsRemote() { + flags |= uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) } - return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative + return flags // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 4b4cc76f4..76b7cd461 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -17,9 +17,10 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" ) @@ -45,6 +46,9 @@ type client struct { conn *grpc.ClientConn tscMu sync.RWMutex tsc coltracepb.TraceServiceClient + + instID int64 + inst *observ.Instrumentation } // Compile time check *client implements otlptrace.Client. @@ -68,6 +72,7 @@ func newClient(opts ...Option) *client { stopCtx: ctx, stopFunc: cancel, conn: cfg.GRPCConn, + instID: counter.NextExporterID(), } if len(cfg.Traces.Headers) > 0 { @@ -92,13 +97,24 @@ func (c *client) Start(context.Context) error { c.conn = conn } + // Initialize the instrumentation if not already done. + // + // Initialize here instead of NewClient to allow any errors to be passed + // back to the caller and so that any setup of the environment variables to + // enable instrumentation can be set via code. + var err error + if c.inst == nil { + target := c.conn.CanonicalTarget() + c.inst, err = observ.NewInstrumentation(c.instID, target) + } + // The otlptrace.Client interface states this method is called just once, // so no need to check if already started. c.tscMu.Lock() c.tsc = coltracepb.NewTraceServiceClient(c.conn) c.tscMu.Unlock() - return nil + return err } var errAlreadyStopped = errors.New("the client is already stopped") @@ -174,7 +190,7 @@ var errShutdown = errors.New("the client is shutdown") // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) (uploadErr error) { // Hold a read lock to ensure a shut down initiated after this starts does // not abandon the export. This read lock acquire has less priority than a // write lock acquire (i.e. Stop), meaning if the client is shutting down @@ -189,6 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc ctx, cancel := c.exportContext(ctx) defer cancel() + var code codes.Code + if c.inst != nil { + op := c.inst.ExportSpans(ctx, len(protoSpans)) + defer func() { op.End(uploadErr, code) }() + } + return c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, @@ -197,16 +219,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { - err := internal.TracePartialSuccessError(n, msg) - otel.Handle(err) + e := internal.TracePartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, e) } } // nil is converted to OK. - if status.Code(err) == codes.OK { + code = status.Code(err) + if code == codes.OK { // Success. - return nil + return uploadErr } - return err + return errors.Join(uploadErr, err) }) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go new file mode 100644 index 000000000..323b2a2c9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index b6e6b10fb..7fe9c9f3a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -23,3 +23,12 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go + +//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\" }" --out=x/x.go +//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target.go.tmpl "--data={ \"pkg\": \"observ\", \"pkg_path\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ\" }" --out=observ/target.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target_test.go.tmpl "--data={ \"pkg\": \"observ\" }" --out=observ/target_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go new file mode 100644 index 000000000..0dd54e4b9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// otlptracegrpc exporter. +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go new file mode 100644 index 000000000..2257fcc86 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/codes" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 + // server.addr + 1 + // server.port + 1 + // error.type + 1 // rpc.grpc.status_code + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the exporter with the +// provided ID. +func ComponentName(id int64) string { + t := semconv.OTelComponentTypeOtlpGRPCSpanExporter.Value.AsString() + return fmt.Sprintf("%s/%d", t, id) +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + inflightSpans metric.Int64UpDownCounter + exportedSpans metric.Int64Counter + opDuration metric.Float64Histogram + + attrs []attribute.KeyValue + addOpt metric.AddOption + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for an OTLP over gPRC trace +// exporter with the provided ID using the global MeterProvider. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the endpoint the exporter is exporting to. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(id int64, target string) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + attrs := BaseAttrs(id, target) + i := &Instrumentation{ + attrs: attrs, + addOpt: metric.WithAttributeSet(attribute.NewSet(attrs...)), + + // Do not modify attrs (NewSet sorts in-place), make a new slice. + recOpt: metric.WithAttributeSet(attribute.NewSet(append( + // Default to OK status code. + []attribute.KeyValue{semconv.RPCGRPCStatusCodeOk}, + attrs..., + )...)), + } + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err error + + inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m) + if e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightSpans = inflightSpans.Inst() + + exportedSpans, e := otelconv.NewSDKExporterSpanExported(m) + if e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedSpans = exportedSpans.Inst() + + opDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.opDuration = opDuration.Inst() + + return i, err +} + +// BaseAttrs returns the base attributes for the exporter with the provided ID +// and target. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the gRPC target the exporter is exporting to. It is expected +// to be the output of the Client's CanonicalTarget method. +func BaseAttrs(id int64, target string) []attribute.KeyValue { + host, port, err := ParseCanonicalTarget(target) + if err != nil || (host == "" && port < 0) { + if err != nil { + global.Debug("failed to parse target", "target", target, "error", err) + } + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + } + } + + // Do not use append so the slice is exactly allocated. + + if port < 0 { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + } + } + + if host == "" { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerPort(port), + } + } + + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + semconv.ServerPort(port), + } +} + +// ExportSpans instruments the ExportSpans method of the exporter. It returns +// an [ExportOp] that must have its [ExportOp.End] method called when the +// ExportSpans method returns. +func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { + start := time.Now() + + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + + return ExportOp{ + ctx: ctx, + start: start, + nSpans: int64(nSpans), + inst: i, + } +} + +// ExportOp tracks the operation being observed by [Instrumentation.ExportSpans]. +type ExportOp struct { + ctx context.Context + start time.Time + nSpans int64 + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.ExportSpans]. +// +// Any error that is encountered is provided as err. +// +// If err is not nil, all spans will be recorded as failures unless error is of +// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number +// of successfully exported spans will be determined by inspecting the +// RejectedItems field of the PartialSuccess. +func (e ExportOp) End(err error, code codes.Code) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + + success := successful(e.nSpans, err) + // Record successfully exported spans, even if the value is 0 which are + // meaningful to distribution aggregations. + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + o := metric.WithAttributeSet(attribute.NewSet(*attrs...)) + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], o) + + e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) + } + + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, code)) + + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the operation being recorded. +// +// If err is nil and code is codes.OK, the default recOpt of the +// Instrumentation is returned. +// +// If err is not nil or code is not codes.OK, a new RecordOption is returned +// with the base attributes of the Instrumentation plus the rpc.grpc.status_code +// attribute set to the provided code, and if err is not nil, the error.type +// attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error, code codes.Code) metric.RecordOption { + if err == nil && code == codes.OK { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + + c := int64(code) // uint32 -> int64. + *attrs = append(*attrs, semconv.RPCGRPCStatusCodeKey.Int64(c)) + if err != nil { + *attrs = append(*attrs, semconv.ErrorType(err)) + } + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} + +// successful returns the number of successfully exported spans out of the n +// that were exported based on the provided error. +// +// If err is nil, n is returned. All spans were successfully exported. +// +// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned. +// It is assumed all spans failed to be exported. +// +// If err is an [internal.PartialSuccess] error, the number of successfully +// exported spans is computed by subtracting the RejectedItems field from n. If +// RejectedItems is negative, n is returned. If RejectedItems is greater than +// n, 0 is returned. +func successful(n int64, err error) int64 { + if err == nil { + return n // All spans successfully exported. + } + // Split rejection calculation so successful is inlinable. + return n - rejected(n, err) +} + +var errPartialPool = &sync.Pool{ + New: func() any { return new(internal.PartialSuccess) }, +} + +// rejected returns how many out of the n spans exporter were rejected based on +// the provided non-nil err. +func rejected(n int64, err error) int64 { + ps := errPartialPool.Get().(*internal.PartialSuccess) + defer errPartialPool.Put(ps) + // Check for partial success. + if errors.As(err, ps) { + // Bound RejectedItems to [0, n]. This should not be needed, + // but be defensive as this is from an external source. + return min(max(ps.RejectedItems, 0), n) + } + return n // All spans rejected. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go new file mode 100644 index 000000000..34eee27db --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go @@ -0,0 +1,143 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/observ/target.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "errors" + "fmt" + "net" + "net/netip" + "strconv" + "strings" +) + +const ( + schemeUnix = "unix" + schemeUnixAbstract = "unix-abstract" +) + +// ParseCanonicalTarget parses a target string and returns the extracted host +// (domain address or IP), the target port, or an error. +// +// If no port is specified, -1 is returned. +// +// If no host is specified, an empty string is returned. +// +// The target string is expected to always have the form +// "://[authority]/". For example: +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" +// - "unix-abstract:///socket-name" +// - "passthrough:///192.34.2.1:42" +// +// The target is expected to come from the CanonicalTarget method of a gRPC +// Client. +func ParseCanonicalTarget(target string) (string, int, error) { + const sep = "://" + + // Find scheme. Do not allocate the string by using url.Parse. + idx := strings.Index(target, sep) + if idx == -1 { + return "", -1, fmt.Errorf("invalid target %q: missing scheme", target) + } + scheme, endpoint := target[:idx], target[idx+len(sep):] + + // Check for unix schemes. + if scheme == schemeUnix || scheme == schemeUnixAbstract { + return parseUnix(endpoint) + } + + // Strip leading slash and any authority. + if i := strings.Index(endpoint, "/"); i != -1 { + endpoint = endpoint[i+1:] + } + + // DNS, passthrough, and custom resolvers. + return parseEndpoint(endpoint) +} + +// parseUnix parses unix socket targets. +func parseUnix(endpoint string) (string, int, error) { + // Format: unix[-abstract]://path + // + // We should have "/path" (empty authority) if valid. + if len(endpoint) >= 1 && endpoint[0] == '/' { + // Return the full path including leading slash. + return endpoint, -1, nil + } + + // If there's no leading slash, it means there might be an authority + // Check for authority case (should error): "authority/path" + if slashIdx := strings.Index(endpoint, "/"); slashIdx > 0 { + return "", -1, fmt.Errorf("invalid (non-empty) authority: %s", endpoint[:slashIdx]) + } + + return "", -1, errors.New("invalid unix target format") +} + +// parseEndpoint parses an endpoint from a gRPC target. +// +// It supports the following formats: +// - "host" +// - "host%zone" +// - "host:port" +// - "host%zone:port" +// - "ipv4" +// - "ipv4%zone" +// - "ipv4:port" +// - "ipv4%zone:port" +// - "ipv6" +// - "ipv6%zone" +// - "[ipv6]" +// - "[ipv6%zone]" +// - "[ipv6]:port" +// - "[ipv6%zone]:port" +// +// It returns the host or host%zone (domain address or IP), the port (or -1 if +// not specified), or an error if the input is not a valid. +func parseEndpoint(endpoint string) (string, int, error) { + // First check if the endpoint is just an IP address. + if ip := parseIP(endpoint); ip != "" { + return ip, -1, nil + } + + // If there's no colon, there is no port (IPv6 with no port checked above). + if !strings.Contains(endpoint, ":") { + return endpoint, -1, nil + } + + host, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err) + } + + const base, bitSize = 10, 16 + port16, err := strconv.ParseUint(portStr, base, bitSize) + if err != nil { + return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err) + } + port := int(port16) // port is guaranteed to be in the range [0, 65535]. + + return host, port, nil +} + +// parseIP attempts to parse the entire endpoint as an IP address. +// It returns the normalized string form of the IP if successful, +// or an empty string if parsing fails. +func parseIP(ip string) string { + // Strip leading and trailing brackets for IPv6 addresses. + if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ip = ip[1 : len(ip)-1] + } + addr, err := netip.ParseAddr(ip) + if err != nil { + return "" + } + // Return the normalized string form of the IP. + return addr.String() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index 1c4659423..a811a07b3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 259a898ae..a7b8e81a7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go new file mode 100644 index 000000000..e2d7cee1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +// Version is the current release version of the OpenTelemetry OTLP gRPC trace +// exporter in use. +const Version = "1.39.0" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md similarity index 54% rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md index feec16fa6..15a3011bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md @@ -1,33 +1,34 @@ # Experimental Features -The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. +The `otlptracegrpc` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `otlptracegrpc` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. -These features may change in backwards incompatible ways as feedback is applied. +These feature may change in backwards incompatible ways as feedback is applied. See the [Compatibility and Stability](#compatibility-and-stability) section for more information. ## Features -- [Self-Observability](#self-observability) +- [Observability](#observability) -### Self-Observability +### Observability -The SDK provides a self-observability feature that allows you to monitor the SDK itself. +The `otlptracegrpc` exporter provides a observability feature that allows you to monitor the SDK itself. -To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. When enabled, the SDK will create the following metrics using the global `MeterProvider`: -- `otel.sdk.span.live` -- `otel.sdk.span.started` +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. -[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.37.0/docs/otel/sdk-metrics.md ## Compatibility and Stability -Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../../VERSIONING.md). These features may be removed or modified in successive version releases, including patch versions. When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go new file mode 100644 index 000000000..4e89c6524 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go similarity index 55% rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go index 2fcbbcc66..741ba62c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go @@ -1,45 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. -package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" import ( "os" - "strings" ) -// SelfObservability is an experimental feature flag that determines if SDK -// self-observability metrics are enabled. -// -// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -49,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 3b79c1a0b..6838f3c4e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index adb37b5b0..6db969f73 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b32..527d9aec8 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e4..e42dd6e70 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6692d2665..271ab71f1 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go new file mode 100644 index 000000000..bfeb73e81 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import "strings" + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature( + []string{"RESOURCE"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 1be472e91..13347e560 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -1,48 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x contains support for OTel SDK experimental features. -// -// This package should only be used for features defined in the specification. -// It should not be used for experiments or new project ideas. +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. package x // import "go.opentelemetry.io/otel/sdk/internal/x" import ( "os" - "strings" ) -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -52,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 0f3b9d623..dd75eefac 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -63,6 +63,22 @@ // issues for databases that cannot handle high cardinality. // - A too low of a limit causes loss of attribute detail as more data falls into overflow. // +// # Ordering and Collection Guarantees +// +// For performance reasons, the SDK does not guarantee that the order in which +// synchronous measurements are made to the SDK is reflected in the collected +// metric data. This means that even when a single goroutine makes sequential +// synchronous measurements, it is possible for a later measurement to be +// included in the collected metric data when an earlier measurement is not. +// This applies to measurements made to different instruments, or to different +// attribute sets on the same instrument. Sequential measurements made to the +// same instrument and with the same attributes are guaranteed to preserve +// ordering with respect to collection. +// +// Additionally, the SDK does not guarantee that exemplars are always included +// in the same batch of metric data as the measurement they are associated +// with. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 08e8f68fe..453278a0c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -7,9 +7,11 @@ import ( "context" "math" "math/rand/v2" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. @@ -34,7 +36,9 @@ var _ Reservoir = &FixedSizeReservoir{} // If there are more than k, the Reservoir will then randomly sample all // additional measurement with a decreasing probability. type FixedSizeReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // count is the number of measurement seen. count int64 @@ -123,12 +127,14 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a // https://github.com/MrAlias/reservoir-sampling for a performance // comparison of reservoir sampling algorithms. - if int(r.count) < cap(r.store) { - r.store[r.count] = newMeasurement(ctx, t, n, a) + r.mu.Lock() + defer r.mu.Unlock() + if int(r.count) < cap(r.measurements) { + r.store(int(r.count), newMeasurement(ctx, t, n, a)) } else if r.count == r.next { // Overwrite a random existing measurement with the one offered. - idx := int(rand.Int64N(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) + idx := int(rand.Int64N(int64(cap(r.measurements)))) + r.store(idx, newMeasurement(ctx, t, n, a)) r.advance() } r.count++ @@ -139,7 +145,7 @@ func (r *FixedSizeReservoir) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. - r.next = int64(cap(r.store)) + r.next = int64(cap(r.measurements)) // Initial random number in the series used to generate r.next. // @@ -150,7 +156,7 @@ func (r *FixedSizeReservoir) reset() { // This maps the uniform random number in (0,1) to a geometric distribution // over the same interval. The mean of the distribution is inversely // proportional to the storage capacity. - r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) r.advance() } @@ -170,7 +176,7 @@ func (r *FixedSizeReservoir) advance() { // therefore the next r.w will be based on the same distribution (i.e. // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) // Use the new random number in the series to calculate the count of the // next measurement that will be stored. // @@ -188,6 +194,8 @@ func (r *FixedSizeReservoir) advance() { // // The Reservoir state is preserved after this call. func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go index decab613e..60c871a44 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -7,9 +7,11 @@ import ( "context" "slices" "sort" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // HistogramReservoirProvider is a provider of [HistogramReservoir]. @@ -39,7 +41,9 @@ var _ Reservoir = &HistogramReservoir{} // falls within a histogram bucket. The histogram bucket upper-boundaries are // define by bounds. type HistogramReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // bounds are bucket bounds in ascending order. bounds []float64 @@ -57,14 +61,29 @@ type HistogramReservoir struct { // parameters are the value and dropped (filtered) attributes of the // measurement respectively. func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 + var n float64 switch v.Type() { case Int64ValueType: - x = float64(v.Int64()) + n = float64(v.Int64()) case Float64ValueType: - x = v.Float64() + n = v.Float64() default: panic("unknown value type") } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) + + idx := sort.SearchFloat64s(r.bounds, n) + m := newMeasurement(ctx, t, v, a) + + r.mu.Lock() + defer r.mu.Unlock() + r.store(idx, m) +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *HistogramReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() + r.storage.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go index 0e2e26dfb..16b61c07d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go @@ -13,24 +13,28 @@ import ( // storage is an exemplar storage for [Reservoir] implementations. type storage struct { - // store are the measurements sampled. + // measurements are the measurements sampled. // // This does not use []metricdata.Exemplar because it potentially would // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement + measurements []measurement } func newStorage(n int) *storage { - return &storage{store: make([]measurement, n)} + return &storage{measurements: make([]measurement, n)} +} + +func (r *storage) store(idx int, m measurement) { + r.measurements[idx] = m } // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. func (r *storage) Collect(dest *[]Exemplar) { - *dest = reset(*dest, len(r.store), len(r.store)) + *dest = reset(*dest, len(r.measurements), len(r.measurements)) var n int - for _, m := range r.store { + for _, m := range r.measurements { if !m.valid { continue } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go index 25ea6244e..e0558cb63 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -23,8 +23,9 @@ const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogr var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} func (i InstrumentKind) String() string { - if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_InstrumentKind_index)-1 { return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] + return _InstrumentKind_name[_InstrumentKind_index[idx]:_InstrumentKind_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index 0321da681..2b6041080 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -110,12 +110,13 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati // Sum returns a sum aggregate function input and output. func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { - s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: - return b.filter(s.measure), s.delta + s := newDeltaSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect default: - return b.filter(s.measure), s.cumulative + s := newCumulativeSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go new file mode 100644 index 000000000..0fa6d3c6f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "math" + "runtime" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" +) + +// atomicCounter is an efficient way of adding to a number which is either an +// int64 or float64. It is designed to be efficient when adding whole +// numbers, regardless of whether N is an int64 or float64. +// +// Inspired by the Prometheus counter implementation: +// https://github.com/prometheus/client_golang/blob/14ccb93091c00f86b85af7753100aa372d63602b/prometheus/counter.go#L108 +type atomicCounter[N int64 | float64] struct { + // nFloatBits contains only the non-integer portion of the counter. + nFloatBits atomic.Uint64 + // nInt contains only the integer portion of the counter. + nInt atomic.Int64 +} + +// load returns the current value. The caller must ensure all calls to add have +// returned prior to calling load. +func (n *atomicCounter[N]) load() N { + fval := math.Float64frombits(n.nFloatBits.Load()) + ival := n.nInt.Load() + return N(fval + float64(ival)) +} + +func (n *atomicCounter[N]) add(value N) { + ival := int64(value) + // This case is where the value is an int, or if it is a whole-numbered float. + if float64(ival) == float64(value) { + n.nInt.Add(ival) + return + } + + // Value must be a float below. + for { + oldBits := n.nFloatBits.Load() + newBits := math.Float64bits(math.Float64frombits(oldBits) + float64(value)) + if n.nFloatBits.CompareAndSwap(oldBits, newBits) { + return + } + } +} + +// hotColdWaitGroup is a synchronization primitive which enables lockless +// writes for concurrent writers and enables a reader to acquire exclusive +// access to a snapshot of state including only completed operations. +// Conceptually, it can be thought of as a "hot" wait group, +// and a "cold" wait group, with the ability for the reader to atomically swap +// the hot and cold wait groups, and wait for the now-cold wait group to +// complete. +// +// Inspired by the prometheus/client_golang histogram implementation: +// https://github.com/prometheus/client_golang/blob/a974e0d45e0aa54c65492559114894314d8a2447/prometheus/histogram.go#L725 +// +// Usage: +// +// var hcwg hotColdWaitGroup +// var data [2]any +// +// func write() { +// hotIdx := hcwg.start() +// defer hcwg.done(hotIdx) +// // modify data without locking +// data[hotIdx].update() +// } +// +// func read() { +// coldIdx := hcwg.swapHotAndWait() +// // read data now that all writes to the cold data have completed. +// data[coldIdx].read() +// } +type hotColdWaitGroup struct { + // startedCountAndHotIdx contains a 63-bit counter in the lower bits, + // and a 1 bit hot index to denote which of the two data-points new + // measurements to write to. These are contained together so that read() + // can atomically swap the hot bit, reset the started writes to zero, and + // read the number writes that were started prior to the hot bit being + // swapped. + startedCountAndHotIdx atomic.Uint64 + // endedCounts is the number of writes that have completed to each + // dataPoint. + endedCounts [2]atomic.Uint64 +} + +// start returns the hot index that the writer should write to. The returned +// hot index is 0 or 1. The caller must call done(hot index) after it finishes +// its operation. start() is safe to call concurrently with other methods. +func (l *hotColdWaitGroup) start() uint64 { + // We increment h.startedCountAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to return the currently-hot index. + return l.startedCountAndHotIdx.Add(1) >> 63 +} + +// done signals to the reader that an operation has fully completed. +// done is safe to call concurrently. +func (l *hotColdWaitGroup) done(hotIdx uint64) { + l.endedCounts[hotIdx].Add(1) +} + +// swapHotAndWait swaps the hot bit, waits for all start() calls to be done(), +// and then returns the now-cold index for the reader to read from. The +// returned index is 0 or 1. swapHotAndWait must not be called concurrently. +func (l *hotColdWaitGroup) swapHotAndWait() uint64 { + n := l.startedCountAndHotIdx.Load() + coldIdx := (^n) >> 63 + // Swap the hot and cold index while resetting the started measurements + // count to zero. + n = l.startedCountAndHotIdx.Swap((coldIdx << 63)) + hotIdx := n >> 63 + startedCount := n & ((1 << 63) - 1) + // Wait for all measurements to the previously-hot map to finish. + for startedCount != l.endedCounts[hotIdx].Load() { + runtime.Gosched() // Let measurements complete. + } + // reset the number of ended operations + l.endedCounts[hotIdx].Store(0) + return hotIdx +} + +// limitedSyncMap is a sync.Map which enforces the aggregation limit on +// attribute sets and provides a Len() function. +type limitedSyncMap struct { + sync.Map + aggLimit int + len int + lenMux sync.Mutex +} + +func (m *limitedSyncMap) LoadOrStoreAttr(fltrAttr attribute.Set, newValue func(attribute.Set) any) any { + actual, loaded := m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + // If the overflow set exists, assume we have already overflowed and don't + // bother with the slow path below. + actual, loaded = m.Load(overflowSet.Equivalent()) + if loaded { + return actual + } + // Slow path: add a new attribute set. + m.lenMux.Lock() + defer m.lenMux.Unlock() + + // re-fetch now that we hold the lock to ensure we don't use the overflow + // set unless we are sure the attribute set isn't being written + // concurrently. + actual, loaded = m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + + if m.aggLimit > 0 && m.len >= m.aggLimit-1 { + fltrAttr = overflowSet + } + actual, loaded = m.LoadOrStore(fltrAttr.Equivalent(), newValue(fltrAttr)) + if !loaded { + m.len++ + } + return actual +} + +func (m *limitedSyncMap) Clear() { + m.lenMux.Lock() + defer m.lenMux.Unlock() + m.len = 0 + m.Map.Clear() +} + +func (m *limitedSyncMap) Len() int { + m.lenMux.Lock() + defer m.lenMux.Unlock() + return m.len +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 857eddf30..5b3a19c06 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -301,7 +301,7 @@ func newExponentialHistogram[N int64 | float64]( maxScale: maxScale, newRes: r, - limit: newLimiter[*expoHistogramDataPoint[N]](limit), + limit: newLimiter[expoHistogramDataPoint[N]](limit), values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), start: now(), @@ -317,7 +317,7 @@ type expoHistogram[N int64 | float64] struct { maxScale int32 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*expoHistogramDataPoint[N]] + limit limiter[expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -338,13 +338,18 @@ func (e *expoHistogram[N]) measure( e.valuesMu.Lock() defer e.valuesMu.Unlock() - attr := e.limit.Attributes(fltrAttr, e.values) - v, ok := e.values[attr.Equivalent()] + v, ok := e.values[fltrAttr.Equivalent()] if !ok { - v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes(attr) + fltrAttr = e.limit.Attributes(fltrAttr, e.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + v, ok = e.values[fltrAttr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](fltrAttr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes(fltrAttr) - e.values[attr.Equivalent()] = v + e.values[fltrAttr.Equivalent()] = v + } } v.record(value) v.res.Offer(ctx, value, droppedAttr) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go index d4c41642d..e4f9409bc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -5,10 +5,12 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" + "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter. @@ -29,6 +31,11 @@ type FilteredExemplarReservoir[N int64 | float64] interface { type filteredExemplarReservoir[N int64 | float64] struct { filter exemplar.Filter reservoir exemplar.Reservoir + // The exemplar.Reservoir is not required to be concurrent safe, but + // implementations can indicate that they are concurrent-safe by embedding + // reservoir.ConcurrentSafe in order to improve performance. + reservoirMux sync.Mutex + concurrentSafe bool } // NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values @@ -37,17 +44,30 @@ func NewFilteredExemplarReservoir[N int64 | float64]( f exemplar.Filter, r exemplar.Reservoir, ) FilteredExemplarReservoir[N] { + _, concurrentSafe := r.(reservoir.ConcurrentSafe) return &filteredExemplarReservoir[N]{ - filter: f, - reservoir: r, + filter: f, + reservoir: r, + concurrentSafe: concurrentSafe, } } func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { if f.filter(ctx) { // only record the current time if we are sampling this measurement. - f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr) + ts := time.Now() + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Offer(ctx, ts, exemplar.NewValue(val), attr) } } -func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) } +func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Collect(dest) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 736287e73..a094519cf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -31,9 +31,12 @@ func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { func (b *buckets[N]) sum(value N) { b.total += value } -func (b *buckets[N]) bin(idx int, value N) { +func (b *buckets[N]) bin(idx int) { b.counts[idx]++ b.count++ +} + +func (b *buckets[N]) minMax(value N) { if value < b.min { b.min = value } else if value > b.max { @@ -44,18 +47,19 @@ func (b *buckets[N]) bin(idx int, value N) { // histValues summarizes a set of measurements as an histValues with // explicitly defined buckets. type histValues[N int64 | float64] struct { - noSum bool - bounds []float64 + noMinMax bool + noSum bool + bounds []float64 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*buckets[N]] + limit limiter[buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } func newHistValues[N int64 | float64]( bounds []float64, - noSum bool, + noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N], ) *histValues[N] { @@ -66,11 +70,12 @@ func newHistValues[N int64 | float64]( b := slices.Clone(bounds) slices.Sort(b) return &histValues[N]{ - noSum: noSum, - bounds: b, - newRes: r, - limit: newLimiter[*buckets[N]](limit), - values: make(map[attribute.Distinct]*buckets[N]), + noMinMax: noMinMax, + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), } } @@ -92,24 +97,32 @@ func (s *histValues[N]) measure( s.valuesMu.Lock() defer s.valuesMu.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - b, ok := s.values[attr.Equivalent()] + b, ok := s.values[fltrAttr.Equivalent()] if !ok { - // N+1 buckets. For example: - // - // bounds = [0, 5, 10] - // - // Then, - // - // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) - b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes(attr) + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + b, ok = s.values[fltrAttr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](fltrAttr, len(s.bounds)+1) + b.res = s.newRes(fltrAttr) - // Ensure min and max are recorded values (not zero), for new buckets. - b.min, b.max = value, value - s.values[attr.Equivalent()] = b + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[fltrAttr.Equivalent()] = b + } + } + b.bin(idx) + if !s.noMinMax { + b.minMax(value) } - b.bin(idx, value) if !s.noSum { b.sum(value) } @@ -125,8 +138,7 @@ func newHistogram[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *histogram[N] { return &histogram[N]{ - histValues: newHistValues[N](boundaries, noSum, limit, r), - noMinMax: noMinMax, + histValues: newHistValues[N](boundaries, noMinMax, noSum, limit, r), start: now(), } } @@ -136,8 +148,7 @@ func newHistogram[N int64 | float64]( type histogram[N int64 | float64] struct { *histValues[N] - noMinMax bool - start time.Time + start time.Time } func (s *histogram[N]) delta( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 4bbe624c7..3e2ed7415 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -23,7 +23,7 @@ func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredEx return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), - values: make(map[attribute.Distinct]datapoint[N]), + values: make(map[attribute.Distinct]*datapoint[N]), start: now(), } } @@ -34,7 +34,7 @@ type lastValue[N int64 | float64] struct { newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] - values map[attribute.Distinct]datapoint[N] + values map[attribute.Distinct]*datapoint[N] start time.Time } @@ -42,17 +42,19 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.Lock() defer s.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - d, ok := s.values[attr.Equivalent()] + d, ok := s.values[fltrAttr.Equivalent()] if !ok { - d.res = s.newRes(attr) + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + d = &datapoint[N]{ + res: s.newRes(fltrAttr), + attrs: fltrAttr, + } } - d.attrs = attr d.value = value d.res.Offer(ctx, value, droppedAttr) - s.values[attr.Equivalent()] = d + s.values[fltrAttr.Equivalent()] = d } func (s *lastValue[N]) delta( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go index 9ea0251ed..c19a1aff6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -30,7 +30,7 @@ func newLimiter[V any](aggregation int) limiter[V] { // aggregation cardinality limit for the existing measurements. If it will, // overflowSet is returned. Otherwise, if it will not exceed the limit, or the // limit is not set (limit <= 0), attr is returned. -func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]*V) attribute.Set { if l.aggLimit > 0 { _, exists := measurements[attrs.Equivalent()] if !exists && len(measurements) >= l.aggLimit-1 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 1b4b2304c..816908551 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -5,7 +5,6 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" - "sync" "time" "go.opentelemetry.io/otel/attribute" @@ -13,64 +12,75 @@ import ( ) type sumValue[N int64 | float64] struct { - n N + n atomicCounter[N] res FilteredExemplarReservoir[N] attrs attribute.Set } -// valueMap is the storage for sums. type valueMap[N int64 | float64] struct { - sync.Mutex + values limitedSyncMap newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[sumValue[N]] - values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { - return &valueMap[N]{ - newRes: r, - limit: newLimiter[sumValue[N]](limit), - values: make(map[attribute.Distinct]sumValue[N]), - } +func (s *valueMap[N]) measure( + ctx context.Context, + value N, + fltrAttr attribute.Set, + droppedAttr []attribute.KeyValue, +) { + sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { + return &sumValue[N]{ + res: s.newRes(attr), + attrs: attr, + } + }).(*sumValue[N]) + sv.n.add(value) + // It is possible for collection to race with measurement and observe the + // exemplar in the batch of metrics after the add() for cumulative sums. + // This is an accepted tradeoff to avoid locking during measurement. + sv.res.Offer(ctx, value, droppedAttr) } -func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - s.Lock() - defer s.Unlock() - - attr := s.limit.Attributes(fltrAttr, s.values) - v, ok := s.values[attr.Equivalent()] - if !ok { - v.res = s.newRes(attr) - } - - v.attrs = attr - v.n += value - v.res.Offer(ctx, value, droppedAttr) - - s.values[attr.Equivalent()] = v -} - -// newSum returns an aggregator that summarizes a set of measurements as their -// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle -// the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { - return &sum[N]{ - valueMap: newValueMap[N](limit, r), +// newDeltaSum returns an aggregator that summarizes a set of measurements as +// their arithmetic sum. Each sum is scoped by attributes and the aggregation +// cycle the measurements were made in. +func newDeltaSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *deltaSum[N] { + return &deltaSum[N]{ monotonic: monotonic, start: now(), + hotColdValMap: [2]valueMap[N]{ + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + }, } } -// sum summarizes a set of measurements made as their arithmetic sum. -type sum[N int64 | float64] struct { - *valueMap[N] - +// deltaSum is the storage for sums which resets every collection interval. +type deltaSum[N int64 | float64] struct { monotonic bool start time.Time + + hcwg hotColdWaitGroup + hotColdValMap [2]valueMap[N] } -func (s *sum[N]) delta( +func (s *deltaSum[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + hotIdx := s.hcwg.start() + defer s.hcwg.done(hotIdx) + s.hotColdValMap[hotIdx].measure(ctx, value, fltrAttr, droppedAttr) +} + +func (s *deltaSum[N]) collect( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() @@ -81,33 +91,61 @@ func (s *sum[N]) delta( sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Value = val.n.load() i++ - } - // Do not report stale values. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() // The delta collection cycle resets. s.start = t sData.DataPoints = dPts *dest = sData - return n + return i } -func (s *sum[N]) cumulative( +// newCumulativeSum returns an aggregator that summarizes a set of measurements +// as their arithmetic sum. Each sum is scoped by attributes and the +// aggregation cycle the measurements were made in. +func newCumulativeSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *cumulativeSum[N] { + return &cumulativeSum[N]{ + monotonic: monotonic, + start: now(), + valueMap: valueMap[N]{ + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + } +} + +// deltaSum is the storage for sums which never reset. +type cumulativeSum[N int64 | float64] struct { + monotonic bool + start time.Time + + valueMap[N] +} + +func (s *cumulativeSum[N]) collect( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() @@ -118,30 +156,33 @@ func (s *sum[N]) cumulative( sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) - dPts := reset(sData.DataPoints, n, n) + // Values are being concurrently written while we iterate, so only use the + // current length for capacity. + dPts := reset(sData.DataPoints, 0, s.values.Len()) var i int - for _, value := range s.values { - dPts[i].Attributes = value.attrs - dPts[i].StartTime = s.start - dPts[i].Time = t - dPts[i].Value = value.n - collectExemplars(&dPts[i].Exemplars, value.res.Collect) + s.values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + newPt := metricdata.DataPoint[N]{ + Attributes: val.attrs, + StartTime: s.start, + Time: t, + Value: val.n.load(), + } + collectExemplars(&newPt.Exemplars, val.res.Collect) + dPts = append(dPts, newPt) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. i++ - } + return true + }) sData.DataPoints = dPts *dest = sData - return n + return i } // newPrecomputedSum returns an aggregator that summarizes a set of @@ -153,27 +194,22 @@ func newPrecomputedSum[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *precomputedSum[N] { return &precomputedSum[N]{ - valueMap: newValueMap[N](limit, r), - monotonic: monotonic, - start: now(), + deltaSum: newDeltaSum(monotonic, limit, r), } } // precomputedSum summarizes a set of observations as their arithmetic sum. type precomputedSum[N int64 | float64] struct { - *valueMap[N] + *deltaSum[N] - monotonic bool - start time.Time - - reported map[attribute.Distinct]N + reported map[any]N } func (s *precomputedSum[N]) delta( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() - newReported := make(map[attribute.Distinct]N) + newReported := make(map[any]N) // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. @@ -181,27 +217,29 @@ func (s *precomputedSum[N]) delta( sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for key, value := range s.values { - delta := value.n - s.reported[key] + s.hotColdValMap[readIdx].values.Range(func(key, value any) bool { + val := value.(*sumValue[N]) + n := val.n.load() - dPts[i].Attributes = value.attrs + delta := n - s.reported[key] + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta - collectExemplars(&dPts[i].Exemplars, value.res.Collect) - - newReported[key] = value.n + newReported[key] = n i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() s.reported = newReported // The delta collection cycle resets. s.start = t @@ -209,7 +247,7 @@ func (s *precomputedSum[N]) delta( sData.DataPoints = dPts *dest = sData - return n + return i } func (s *precomputedSum[N]) cumulative( @@ -223,27 +261,28 @@ func (s *precomputedSum[N]) cumulative( sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // cumulative precomputed always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) - + dPts[i].Value = val.n.load() i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() sData.DataPoints = dPts *dest = sData - return n + return i } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go new file mode 100644 index 000000000..41cfc6bcb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// metric reader. +package observ // import "go.opentelemetry.io/otel/sdk/metric/internal/observ" + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/sdk/metric/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 // error.type + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the metric reader with the +// provided ComponentType and ID. +func ComponentName(componentType string, id int64) string { + return fmt.Sprintf("%s/%d", componentType, id) +} + +// Instrumentation is experimental instrumentation for the metric reader. +type Instrumentation struct { + colDuration metric.Float64Histogram + + attrs []attribute.KeyValue + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for metric reader with the provided component +// type (such as periodic and manual metric reader) and ID. It uses the global +// MeterProvider to create the instrumentation. +// +// The id should be the unique metric reader instance ID. It is used +// to set the "component.name" attribute. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(componentType string, id int64) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{ + attrs: []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(componentType, id)), + semconv.OTelComponentTypeKey.String(componentType), + }, + } + + r := attribute.NewSet(i.attrs...) + i.recOpt = metric.WithAttributeSet(r) + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + colDuration, err := otelconv.NewSDKMetricReaderCollectionDuration(meter) + if err != nil { + err = fmt.Errorf("failed to create collection duration metric: %w", err) + } + i.colDuration = colDuration.Inst() + + return i, err +} + +// CollectMetrics instruments the collect method of metric reader. It returns an +// [CollectOp] that must have its [CollectOp.End] method called when the +// collection end. +func (i *Instrumentation) CollectMetrics(ctx context.Context) CollectOp { + start := time.Now() + + return CollectOp{ + ctx: ctx, + start: start, + inst: i, + } +} + +// CollectOp tracks the collect operation being observed by +// [Instrumentation.CollectMetrics]. +type CollectOp struct { + ctx context.Context + start time.Time + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.CollectMetrics]. +// +// Any error that is encountered is provided as err. +func (e CollectOp) End(err error) { + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err)) + + d := time.Since(e.start).Seconds() + e.inst.colDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the collection being recorded. +// +// If err is nil, the default recOpt of the Instrumentation is returned. +// +// Otherwise, a new RecordOption is returned with the base attributes of the +// Instrumentation plus the error.type attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error) metric.RecordOption { + if err == nil { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go new file mode 100644 index 000000000..3be234a41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" + +// ConcurrentSafe is an interface that can be embedded in an +// exemplar.Reservoir to indicate to the SDK that it is safe to invoke its +// methods concurrently. If this interface is not embedded, the SDK assumes it +// is not safe to call concurrently and locks around Reservoir methods. This +// is currently only used by the built-in reservoirs. +type ConcurrentSafe interface{ concurrentSafe() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go new file mode 100644 index 000000000..6cd213b5f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package reservoir contains experimental features used by built-in exemplar +// reservoirs which require coordination with the metrics SDK. +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 85d3dc207..5b0630207 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -10,10 +10,18 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) +const ( + // ManualReaderType uniquely identifies the OpenTelemetry Metric Reader component + // being instrumented. + manualReaderType = "go.opentelemetry.io/otel/sdk/metric/metric.ManualReader" +) + // ManualReader is a simple Reader that allows an application to // read metrics on demand. type ManualReader struct { @@ -26,6 +34,8 @@ type ManualReader struct { temporalitySelector TemporalitySelector aggregationSelector AggregationSelector + + inst *observ.Instrumentation } // Compile time check the manualReader implements Reader and is comparable. @@ -39,9 +49,24 @@ func NewManualReader(opts ...ManualReaderOption) *ManualReader { aggregationSelector: cfg.aggregationSelector, } r.externalProducers.Store(cfg.producers) + + var err error + r.inst, err = observ.NewInstrumentation(manualReaderType, nextManualReaderID()) + if err != nil { + otel.Handle(err) + } + return r } +var manualReaderIDCounter atomic.Int64 + +// nextManualReaderID returns an identifier for this manual reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextManualReaderID() int64 { + return manualReaderIDCounter.Add(1) - 1 +} + // register stores the sdkProducer which enables the caller // to read metrics from the SDK on demand. func (mr *ManualReader) register(p sdkProducer) { @@ -93,12 +118,20 @@ func (mr *ManualReader) Shutdown(context.Context) error { // // This method is safe to call concurrently. func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + var err error + if mr.inst != nil { + cp := mr.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if rm == nil { - return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + err = errors.New("manual reader: *metricdata.ResourceMetrics is nil") + return err } p := mr.sdkProducer.Load() if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -107,11 +140,11 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("manual reader: invalid producer: %T", p) + err = fmt.Errorf("manual reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go index 4da833cdc..129cc6430 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -18,8 +18,9 @@ const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTempora var _Temporality_index = [...]uint8{0, 20, 41, 57} func (i Temporality) String() string { - if i >= Temporality(len(_Temporality_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Temporality_index)-1 { return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] + return _Temporality_name[_Temporality_index[idx]:_Temporality_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index f08c771a6..e78402afc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -13,7 +13,9 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // Default periodic reader timing. @@ -126,9 +128,26 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri r.run(ctx, conf.interval) }() + var err error + r.inst, err = observ.NewInstrumentation( + semconv.OTelComponentTypePeriodicMetricReader.Value.AsString(), + nextPeriodicReaderID(), + ) + if err != nil { + otel.Handle(err) + } + return r } +var periodicReaderIDCounter atomic.Int64 + +// nextPeriodicReaderID returns an identifier for this periodic reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextPeriodicReaderID() int64 { + return periodicReaderIDCounter.Add(1) - 1 +} + // PeriodicReader is a Reader that continuously collects and exports metric // data at a set interval. type PeriodicReader struct { @@ -148,6 +167,8 @@ type PeriodicReader struct { shutdownOnce sync.Once rmPool sync.Pool + + inst *observ.Instrumentation } // Compile time check the periodicReader implements Reader and is comparable. @@ -235,8 +256,15 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet // collect unwraps p as a produceHolder and returns its produce results. func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error { + var err error + if r.inst != nil { + cp := r.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -245,11 +273,11 @@ func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.Reso // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("periodic reader: invalid producer: %T", p) + err = fmt.Errorf("periodic reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 5c1cea825..7b205c736 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -127,10 +127,40 @@ type TemporalitySelector func(InstrumentKind) metricdata.Temporality // DefaultTemporalitySelector is the default TemporalitySelector used if // WithTemporalitySelector is not provided. CumulativeTemporality will be used // for all instrument kinds if this TemporalitySelector is used. -func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { +func DefaultTemporalitySelector(k InstrumentKind) metricdata.Temporality { + return CumulativeTemporalitySelector(k) +} + +// CumulativeTemporalitySelector is the TemporalitySelector that uses +// a cumulative temporality for all instrument kinds. +func CumulativeTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } +// DeltaTemporalitySelector is the TemporalitySelector that uses +// a delta temporality for instrument kinds: counter, histogram, observable counter +// All other instruments use cumulative temporality. +func DeltaTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +// LowMemoryTemporalitySelector is the TemporalitySelector that uses +// delta temporality for counters and histograms. All other instruments use +// cumulative temporality. +func LowMemoryTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + // AggregationSelector selects the aggregation and the parameters to use for // that aggregation based on the InstrumentKind. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index dd9051a76..ae5b57b19 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index cc8b8938e..4c1c30f25 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index f84f17324..4a26096c8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build linux -// +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index df12c44c5..63ad2fa4e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d7..2b8ca20b3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build windows -// +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index 7252af79f..a1763267c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index a6ff26a4d..6c50ab686 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index a77742b07..25f629532 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 9bc3e525d..7d15cbb9c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -6,20 +6,14 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" - "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/env" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" ) @@ -33,8 +27,6 @@ const ( DefaultMaxExportBatchSize = 512 ) -var queueFull = otelconv.ErrorTypeAttr("queue_full") - // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -78,10 +70,7 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 - selfObservabilityEnabled bool - callbackRegistration metric.Registration - spansProcessedCounter otelconv.SDKProcessorSpanProcessed - componentNameAttr attribute.KeyValue + inst *observ.BSP batch []ReadOnlySpan batchMutex sync.Mutex @@ -124,19 +113,14 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } - if x.SelfObservability.Enabled() { - bsp.selfObservabilityEnabled = true - bsp.componentNameAttr = componentName() - - var err error - bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( - bsp.componentNameAttr, - func() int64 { return int64(len(bsp.queue)) }, - int64(bsp.o.MaxQueueSize), - ) - if err != nil { - otel.Handle(err) - } + var err error + bsp.inst, err = observ.NewBSP( + nextProcessorID(), + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) } bsp.stopWait.Add(1) @@ -157,51 +141,6 @@ func nextProcessorID() int64 { return processorIDCounter.Add(1) - 1 } -func componentName() attribute.KeyValue { - id := nextProcessorID() - name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) - return semconv.OTelComponentName(name) -} - -// newBSPObs creates and returns a new set of metrics instruments and a -// registration for a BatchSpanProcessor. It is the caller's responsibility -// to unregister the registration when it is no longer needed. -func newBSPObs( - cmpnt attribute.KeyValue, - qLen func() int64, - qMax int64, -) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { - meter := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) - - qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) - err = errors.Join(err, e) - - spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) - err = errors.Join(err, e) - - cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor - attrs := metric.WithAttributes(cmpnt, cmpntT) - - reg, e := meter.RegisterCallback( - func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(qSize.Inst(), qLen(), attrs) - o.ObserveInt64(qCap.Inst(), qMax, attrs) - return nil - }, - qSize.Inst(), - qCap.Inst(), - ) - err = errors.Join(err, e) - - return spansProcessed, reg, err -} - // OnStart method does nothing. func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -242,8 +181,8 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } - if bsp.selfObservabilityEnabled { - err = errors.Join(err, bsp.callbackRegistration.Unregister()) + if bsp.inst != nil { + err = errors.Join(err, bsp.inst.Shutdown()) } }) return err @@ -357,10 +296,8 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, int64(l), - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + if bsp.inst != nil { + bsp.inst.Processed(ctx, int64(l)) } err := bsp.e.ExportSpans(ctx, bsp.batch) @@ -470,11 +407,8 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } return false } @@ -490,11 +424,8 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) return true default: atomic.AddUint32(&bsp.dropped, 1) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } } return false diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index e58e7f6ed..b502c7d47 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -7,7 +7,7 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. -See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +See [go.opentelemetry.io/otel/sdk/internal/x] for information about the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go index e3309231d..58f68df44 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go @@ -3,7 +3,7 @@ // Package env provides types and functionality for environment variable support // in the OpenTelemetry SDK. -package env // import "go.opentelemetry.io/otel/sdk/internal/env" +package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" import ( "os" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go new file mode 100644 index 000000000..bd7fe2362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" + + // SchemaURL is the schema URL of the instrumentation. + SchemaURL = semconv.SchemaURL +) + +// ErrQueueFull is the attribute value for the "queue_full" error type. +var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( + otelconv.ErrorTypeAttr("queue_full"), +) + +// BSPComponentName returns the component name attribute for a +// BatchSpanProcessor with the given ID. +func BSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeBatchingSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. +type BSP struct { + reg metric.Registration + + processed metric.Int64Counter + processedOpts []metric.AddOption + processedQueueFullOpts []metric.AddOption +} + +func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + if err != nil { + err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) + } + qCapInst := qCap.Inst() + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP queue size metric: %w", e) + err = errors.Join(err, e) + } + qSizeInst := qSize.Inst() + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + cmpnt := BSPComponentName(id) + set := attribute.NewSet(cmpnt, cmpntT) + + obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSizeInst, qLen(), obsOpts...) + o.ObserveInt64(qCapInst, qMax, obsOpts...) + return nil + }, + qSizeInst, + qCapInst, + ) + if e != nil { + e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) + err = errors.Join(err, e) + } + + processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) + err = errors.Join(err, e) + } + processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) + processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + return &BSP{ + reg: reg, + processed: processed.Inst(), + processedOpts: processedOpts, + processedQueueFullOpts: processedQueueFullOpts, + }, err +} + +func (b *BSP) Shutdown() error { return b.reg.Unregister() } + +func (b *BSP) Processed(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedOpts...) +} + +func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedQueueFullOpts...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go new file mode 100644 index 000000000..b542121e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel trace SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go new file mode 100644 index 000000000..7d3387061 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. +type SSP struct { + spansProcessedCounter metric.Int64Counter + addOpts []metric.AddOption + attrs []attribute.KeyValue +} + +// SSPComponentName returns the component name attribute for a +// SimpleSpanProcessor with the given ID. +func SSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSSP(id int64) (*SSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) + if err != nil { + err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) + } + + componentName := SSPComponentName(id) + componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) + attrs := []attribute.KeyValue{componentName, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SSP{ + spansProcessedCounter: spansProcessedCounter.Inst(), + addOpts: addOpts, + attrs: attrs, + }, err +} + +// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { + ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) +} + +func (ssp *SSP) addOption(err error) []metric.AddOption { + if err == nil { + return ssp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, ssp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go new file mode 100644 index 000000000..a8a164589 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/trace" +) + +var meterOpts = []metric.MeterOption{ + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), +} + +// Tracer is instrumentation for an OTel SDK Tracer. +type Tracer struct { + enabled bool + + live metric.Int64UpDownCounter + started metric.Int64Counter +} + +func NewTracer() (Tracer, error) { + if !x.Observability.Enabled() { + return Tracer{}, nil + } + meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) + + var err error + l, e := otelconv.NewSDKSpanLive(meter) + if e != nil { + e = fmt.Errorf("failed to create span live metric: %w", e) + err = errors.Join(err, e) + } + + s, e := otelconv.NewSDKSpanStarted(meter) + if e != nil { + e = fmt.Errorf("failed to create span started metric: %w", e) + err = errors.Join(err, e) + } + + return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err +} + +func (t Tracer) Enabled() bool { return t.enabled } + +func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + key := spanStartedKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + opts := spanStartedOpts[key] + t.started.Add(ctx, 1, opts...) +} + +func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { + t.spanLive(ctx, 1, span) +} + +func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { + t.spanLive(ctx, -1, span) +} + +func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} + opts := spanLiveOpts[key] + t.live.Add(ctx, value, opts...) +} + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedKey struct { + parent parentState + sampling samplingState +} + +var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ + { + parentStateNoParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateLocalParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateRemoteParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + + { + parentStateNoParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateLocalParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + + { + parentStateNoParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateLocalParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, +} + +type spanLiveKey struct { + sampled bool +} + +var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ + {true}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + )), + }, + {false}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + )), + }, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 37ce2ac87..d2cf4ebd3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,29 +5,21 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" - selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" -) +const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" // tracerProviderConfig. type tracerProviderConfig struct { @@ -163,19 +155,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, - selfObservabilityEnabled: x.SelfObservability.Enabled(), + provider: p, + instrumentationScope: is, } - if t.selfObservabilityEnabled { - var err error - t.spanLiveMetric, t.spanStartedMetric, err = newInst() - if err != nil { - msg := "failed to create self-observability metrics for tracer: %w" - err := fmt.Errorf(msg, err) - otel.Handle(err) - } + + var err error + t.inst, err = observ.NewTracer() + if err != nil { + otel.Handle(err) } + p.namedTracer[is] = t } return t, ok @@ -201,23 +190,6 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { - m := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - var err error - spanLiveMetric, e := otelconv.NewSDKSpanLive(m) - err = errors.Join(err, e) - - spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) - err = errors.Join(err, e) - - return spanLiveMetric, spanStartedMetric, err -} - // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 411d9ccdd..771e427a4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" + "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" + "go.opentelemetry.io/otel/trace" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all @@ -17,6 +20,8 @@ type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once + + inst *observ.SSP } var _ SpanProcessor = (*simpleSpanProcessor)(nil) @@ -33,11 +38,26 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } + + var err error + ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } +var simpleProcessorIDCounter atomic.Int64 + +// nextSimpleProcessorID returns an identifier for this simple span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextSimpleProcessorID() int64 { + return simpleProcessorIDCounter.Add(1) - 1 +} + // OnStart does nothing. func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -46,11 +66,20 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() + var err error if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) + if err != nil { otel.Handle(err) } } + + if ssp.inst != nil { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) + ssp.inst.SpanProcessed(ctx, err) + } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index b376051fb..8cfd9f62e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -151,6 +151,12 @@ type recordingSpan struct { // tracer is the SDK tracer that created this span. tracer *tracer + + // origCtx is the context used when starting this span that has the + // recordingSpan instance set as the active span. If not nil, it is used + // when ending the span to ensure any metrics are recorded with a context + // containing this span without requiring an additional allocation. + origCtx context.Context } var ( @@ -158,6 +164,10 @@ var ( _ runtimeTracer = (*recordingSpan)(nil) ) +func (s *recordingSpan) setOrigCtx(ctx context.Context) { + s.origCtx = ctx +} + // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { @@ -496,14 +506,15 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() - if s.tracer.selfObservabilityEnabled { - defer func() { - // Add the span to the context to ensure the metric is recorded - // with the correct span context. - ctx := trace.ContextWithSpan(context.Background(), s) - set := spanLiveSet(s.spanContext.IsSampled()) - s.tracer.spanLiveMetric.AddSet(ctx, -1, set) - }() + if s.tracer.inst.Enabled() { + ctx := s.origCtx + if ctx == nil { + // This should not happen as the origCtx should be set, but + // ensure trace information is propagated in the case of an + // error. + ctx = trace.ContextWithSpan(context.Background(), s) + } + defer s.tracer.inst.SpanEnded(ctx, s) } sps := s.tracer.provider.getSpanProcessors() diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index bec5e2097..321d97430 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -3,7 +3,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import "go.opentelemetry.io/otel/sdk/internal/env" +import "go.opentelemetry.io/otel/sdk/trace/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index e965c4cce..e1d08fd4d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,9 +7,8 @@ import ( "context" "time" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -20,9 +19,7 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope - selfObservabilityEnabled bool - spanLiveMetric otelconv.SDKSpanLive - spanStartedMetric otelconv.SDKSpanStarted + inst observ.Tracer } var _ trace.Tracer = &tracer{} @@ -53,10 +50,17 @@ func (tr *tracer) Start( s := tr.newSpan(ctx, name, &config) newCtx := trace.ContextWithSpan(ctx, s) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { + if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { + // If this is a recording span, store the original context. + // This allows later retrieval of baggage and other information + // that may have been stored in the context at span start time and + // to avoid the allocation of repeatedly calling + // trace.ContextWithSpan. + o.setOrigCtx(newCtx) + } psc := trace.SpanContextFromContext(ctx) - set := spanStartedSet(psc, s) - tr.spanStartedMetric.AddSet(newCtx, 1, set) + tr.inst.SpanStarted(newCtx, psc, s) } if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { @@ -168,12 +172,11 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { // Propagate any existing values from the context with the new span to // the measurement context. ctx = trace.ContextWithSpan(ctx, s) - set := spanLiveSet(s.spanContext.IsSampled()) - tr.spanLiveMetric.AddSet(ctx, 1, set) + tr.inst.SpanLive(ctx, s) } return s @@ -183,112 +186,3 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } - -type parentState int - -const ( - parentStateNoParent parentState = iota - parentStateLocalParent - parentStateRemoteParent -) - -type samplingState int - -const ( - samplingStateDrop samplingState = iota - samplingStateRecordOnly - samplingStateRecordAndSample -) - -type spanStartedSetKey struct { - parent parentState - sampling samplingState -} - -var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ - {parentStateNoParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - - {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - - {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), -} - -func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { - key := spanStartedSetKey{ - parent: parentStateNoParent, - sampling: samplingStateDrop, - } - - if psc.IsValid() { - if psc.IsRemote() { - key.parent = parentStateRemoteParent - } else { - key.parent = parentStateLocalParent - } - } - - if span.IsRecording() { - if span.SpanContext().IsSampled() { - key.sampling = samplingStateRecordAndSample - } else { - key.sampling = samplingStateRecordOnly - } - } - - return spanStartedSetCache[key] -} - -type spanLiveSetKey struct { - sampled bool -} - -var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ - {true}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordAndSample, - ), - ), - {false}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordOnly, - ), - ), -} - -func spanLiveSet(sampled bool) attribute.Set { - key := spanLiveSetKey{sampled: sampled} - return spanLiveSetCache[key] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 7f97cc31e..0a3b36619 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b2..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go deleted file mode 100644 index d031bbea7..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d56..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f485..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7adc..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go index 666bded4b..267979c05 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -4,28 +4,53 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import ( - "fmt" "reflect" "go.opentelemetry.io/otel/attribute" ) // ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { if err == nil { return ErrorTypeOther } - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - if value == "" { - return ErrorTypeOther - } - return ErrorTypeKey.String(value) + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. + + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } + } + return s } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go index 55bde895d..a0ddf652d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -91,6 +91,11 @@ type ClientActiveRequests struct { metric.Int64UpDownCounter } +var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), +} + // NewClientActiveRequests returns a new ClientActiveRequests instrument. func NewClientActiveRequests( m metric.Meter, @@ -101,15 +106,18 @@ func NewClientActiveRequests( return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientActiveRequestsOpts + } else { + opt = append(opt, newClientActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err } return ClientActiveRequests{i}, nil } @@ -223,6 +231,11 @@ type ClientConnectionDuration struct { metric.Float64Histogram } +var newClientConnectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), +} + // NewClientConnectionDuration returns a new ClientConnectionDuration instrument. func NewClientConnectionDuration( m metric.Meter, @@ -233,15 +246,18 @@ func NewClientConnectionDuration( return ClientConnectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientConnectionDurationOpts + } else { + opt = append(opt, newClientConnectionDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.connection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the successfully established outbound HTTP connections."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientConnectionDuration{noop.Float64Histogram{}}, err + return ClientConnectionDuration{noop.Float64Histogram{}}, err } return ClientConnectionDuration{i}, nil } @@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record( func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -353,6 +370,11 @@ type ClientOpenConnections struct { metric.Int64UpDownCounter } +var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), +} + // NewClientOpenConnections returns a new ClientOpenConnections instrument. func NewClientOpenConnections( m metric.Meter, @@ -363,15 +385,18 @@ func NewClientOpenConnections( return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientOpenConnectionsOpts + } else { + opt = append(opt, newClientOpenConnectionsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.open_connections", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), - metric.WithUnit("{connection}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err } return ClientOpenConnections{i}, nil } @@ -488,6 +513,11 @@ type ClientRequestBodySize struct { metric.Int64Histogram } +var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), +} + // NewClientRequestBodySize returns a new ClientRequestBodySize instrument. func NewClientRequestBodySize( m metric.Meter, @@ -498,15 +528,18 @@ func NewClientRequestBodySize( return ClientRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestBodySizeOpts + } else { + opt = append(opt, newClientRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestBodySize{noop.Int64Histogram{}}, err + return ClientRequestBodySize{noop.Int64Histogram{}}, err } return ClientRequestBodySize{i}, nil } @@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record( func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -662,6 +696,11 @@ type ClientRequestDuration struct { metric.Float64Histogram } +var newClientRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), +} + // NewClientRequestDuration returns a new ClientRequestDuration instrument. func NewClientRequestDuration( m metric.Meter, @@ -672,15 +711,18 @@ func NewClientRequestDuration( return ClientRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestDurationOpts + } else { + opt = append(opt, newClientRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP client requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestDuration{noop.Float64Histogram{}}, err + return ClientRequestDuration{noop.Float64Histogram{}}, err } return ClientRequestDuration{i}, nil } @@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record( func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -822,6 +865,11 @@ type ClientResponseBodySize struct { metric.Int64Histogram } +var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), +} + // NewClientResponseBodySize returns a new ClientResponseBodySize instrument. func NewClientResponseBodySize( m metric.Meter, @@ -832,15 +880,18 @@ func NewClientResponseBodySize( return ClientResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientResponseBodySizeOpts + } else { + opt = append(opt, newClientResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientResponseBodySize{noop.Int64Histogram{}}, err + return ClientResponseBodySize{noop.Int64Histogram{}}, err } return ClientResponseBodySize{i}, nil } @@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record( func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -996,6 +1048,11 @@ type ServerActiveRequests struct { metric.Int64UpDownCounter } +var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), +} + // NewServerActiveRequests returns a new ServerActiveRequests instrument. func NewServerActiveRequests( m metric.Meter, @@ -1006,15 +1063,18 @@ func NewServerActiveRequests( return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newServerActiveRequestsOpts + } else { + opt = append(opt, newServerActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.server.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP server requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err } return ServerActiveRequests{i}, nil } @@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct { metric.Int64Histogram } +var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), +} + // NewServerRequestBodySize returns a new ServerRequestBodySize instrument. func NewServerRequestBodySize( m metric.Meter, @@ -1128,15 +1193,18 @@ func NewServerRequestBodySize( return ServerRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestBodySizeOpts + } else { + opt = append(opt, newServerRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestBodySize{noop.Int64Histogram{}}, err + return ServerRequestBodySize{noop.Int64Histogram{}}, err } return ServerRequestBodySize{i}, nil } @@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record( func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1299,6 +1368,11 @@ type ServerRequestDuration struct { metric.Float64Histogram } +var newServerRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), +} + // NewServerRequestDuration returns a new ServerRequestDuration instrument. func NewServerRequestDuration( m metric.Meter, @@ -1309,15 +1383,18 @@ func NewServerRequestDuration( return ServerRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestDurationOpts + } else { + opt = append(opt, newServerRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.server.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP server requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestDuration{noop.Float64Histogram{}}, err + return ServerRequestDuration{noop.Float64Histogram{}}, err } return ServerRequestDuration{i}, nil } @@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record( func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct { metric.Int64Histogram } +var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), +} + // NewServerResponseBodySize returns a new ServerResponseBodySize instrument. func NewServerResponseBodySize( m metric.Meter, @@ -1476,15 +1559,18 @@ func NewServerResponseBodySize( return ServerResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerResponseBodySizeOpts + } else { + opt = append(opt, newServerResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerResponseBodySize{noop.Int64Histogram{}}, err + return ServerResponseBodySize{noop.Int64Histogram{}}, err } return ServerResponseBodySize{i}, nil } @@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record( func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { // the category of synthetic traffic, such as tests or bots. func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { return attribute.String("user_agent.synthetic.type", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go index a78eafd1f..fd064530c 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -3,7 +3,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package httpconv provides types and functionality for OpenTelemetry semantic +// Package otelconv provides types and functionality for OpenTelemetry semantic // conventions in the "otel" namespace. package otelconv @@ -172,6 +172,11 @@ type SDKExporterLogExported struct { metric.Int64Counter } +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. func NewSDKExporterLogExported( m metric.Meter, @@ -182,15 +187,18 @@ func NewSDKExporterLogExported( return SDKExporterLogExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.log.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogExported{noop.Int64Counter{}}, err + return SDKExporterLogExported{noop.Int64Counter{}}, err } return SDKExporterLogExported{i}, nil } @@ -319,6 +327,11 @@ type SDKExporterLogInflight struct { metric.Int64UpDownCounter } +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. func NewSDKExporterLogInflight( m metric.Meter, @@ -329,15 +342,18 @@ func NewSDKExporterLogInflight( return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.log.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterLogInflight{i}, nil } @@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct { metric.Int64Counter } +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointExported returns a new // SDKExporterMetricDataPointExported instrument. func NewSDKExporterMetricDataPointExported( @@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported( return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.metric_data_point.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err } return SDKExporterMetricDataPointExported{i}, nil } @@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct { metric.Int64UpDownCounter } +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointInflight returns a new // SDKExporterMetricDataPointInflight instrument. func NewSDKExporterMetricDataPointInflight( @@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight( return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.metric_data_point.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterMetricDataPointInflight{i}, nil } @@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct { metric.Float64Histogram } +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + // NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration // instrument. func NewSDKExporterOperationDuration( @@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration( return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.exporter.operation.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of exporting a batch of telemetry records."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err } return SDKExporterOperationDuration{i}, nil } @@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record( func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -893,6 +934,11 @@ type SDKExporterSpanExported struct { metric.Int64Counter } +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. func NewSDKExporterSpanExported( m metric.Meter, @@ -903,15 +949,18 @@ func NewSDKExporterSpanExported( return SDKExporterSpanExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.span.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanExported{noop.Int64Counter{}}, err + return SDKExporterSpanExported{noop.Int64Counter{}}, err } return SDKExporterSpanExported{i}, nil } @@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct { metric.Int64UpDownCounter } +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. func NewSDKExporterSpanInflight( m metric.Meter, @@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight( return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.span.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterSpanInflight{i}, nil } @@ -1169,6 +1226,11 @@ type SDKLogCreated struct { metric.Int64Counter } +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + // NewSDKLogCreated returns a new SDKLogCreated instrument. func NewSDKLogCreated( m metric.Meter, @@ -1179,15 +1241,18 @@ func NewSDKLogCreated( return SDKLogCreated{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.log.created", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKLogCreated{noop.Int64Counter{}}, err + return SDKLogCreated{noop.Int64Counter{}}, err } return SDKLogCreated{i}, nil } @@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct { metric.Float64Histogram } +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + // NewSDKMetricReaderCollectionDuration returns a new // SDKMetricReaderCollectionDuration instrument. func NewSDKMetricReaderCollectionDuration( @@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration( return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.metric_reader.collection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the collect operation of the metric reader."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err } return SDKMetricReaderCollectionDuration{i}, nil } @@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record( func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct { metric.Int64Counter } +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. func NewSDKProcessorLogProcessed( m metric.Meter, @@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed( return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.log.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err } return SDKProcessorLogProcessed{i}, nil } @@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity // instrument. func NewSDKProcessorLogQueueCapacity( @@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity( return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueCapacity{i}, nil } @@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. func NewSDKProcessorLogQueueSize( m metric.Meter, @@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize( return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueSize{i}, nil } @@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct { metric.Int64Counter } +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed // instrument. func NewSDKProcessorSpanProcessed( @@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed( return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.span.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err } return SDKProcessorSpanProcessed{i}, nil } @@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity // instrument. func NewSDKProcessorSpanQueueCapacity( @@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity( return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueCapacity{i}, nil } @@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize // instrument. func NewSDKProcessorSpanQueueSize( @@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize( return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueSize{i}, nil } @@ -1910,6 +2032,11 @@ type SDKSpanLive struct { metric.Int64UpDownCounter } +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + // NewSDKSpanLive returns a new SDKSpanLive instrument. func NewSDKSpanLive( m metric.Meter, @@ -1920,15 +2047,18 @@ func NewSDKSpanLive( return SDKSpanLive{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.span.live", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanLive{noop.Int64UpDownCounter{}}, err + return SDKSpanLive{noop.Int64UpDownCounter{}}, err } return SDKSpanLive{i}, nil } @@ -2013,6 +2143,11 @@ type SDKSpanStarted struct { metric.Int64Counter } +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + // NewSDKSpanStarted returns a new SDKSpanStarted instrument. func NewSDKSpanStarted( m metric.Meter, @@ -2023,15 +2158,18 @@ func NewSDKSpanStarted( return SDKSpanStarted{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.span.started", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of created spans."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanStarted{noop.Int64Counter{}}, err + return SDKSpanStarted{noop.Int64Counter{}}, err } return SDKSpanStarted{i}, nil } @@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K // value of the sampler for this span. func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { return attribute.String("otel.span.sampling_result", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index aea11a2b5..d9ecef1ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476ee..d01e79366 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index bcaa5aa53..0d5b02918 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 07145e254..f4a3893eb 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.38.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.60.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.14.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,9 +36,28 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.13 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index a7c5d19bf..1f8d49bc9 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -34,7 +34,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// Represents any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. type AnyValue struct { @@ -252,8 +252,10 @@ type KeyValueList struct { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). + // // The keys MUST be unique (it is not allowed to have more than one // value with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -296,14 +298,16 @@ func (x *KeyValueList) GetValues() []*KeyValue { return nil } -// KeyValue is a key-value pair that is used to store Span attributes, Link +// Represents a key-value pair that is used to store Span attributes, Link // attributes, etc. type KeyValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The key name of the pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the pair. Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } @@ -360,14 +364,21 @@ type InstrumentationScope struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // A name denoting the Instrumentation scope. // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Defines the version of the instrumentation scope. + // An empty instrumentation scope version means the version is unknown. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // Additional attributes that describe the scope. [Optional]. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // The behavior of software that receives duplicated keys can be unpredictable. + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // The number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (x *InstrumentationScope) Reset() { diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index ff8282791..748c9fb1c 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -375,7 +375,8 @@ type ScopeMetrics struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. + // This schema_url applies to the data in the "scope" field and all metrics in the + // "metrics" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -521,11 +522,11 @@ type Metric struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // name of the metric. + // The name of the metric. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // description of the metric, which can be used in documentation. + // A description of the metric, which can be used in documentation. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // unit in which the metric value is reported. Follows the format + // The unit in which the metric value is reported. Follows the format // described by https://unitsofmeasure.org/ucum.html. Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` // Data determines the aggregation type (if any) of the metric, what is the @@ -546,6 +547,7 @@ type Metric struct { // for lossless roundtrip translation to / from another data model. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Metadata []*v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata,omitempty"` } @@ -699,6 +701,8 @@ type Gauge struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -748,11 +752,13 @@ type Sum struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. + // Represents whether the sum is monotonic. IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` } @@ -816,6 +822,8 @@ type Histogram struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. @@ -875,6 +883,8 @@ type ExponentialHistogram struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. @@ -941,6 +951,8 @@ type Summary struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -994,16 +1006,7 @@ type NumberDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1154,16 +1157,7 @@ type HistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1351,16 +1345,7 @@ type ExponentialHistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1373,11 +1358,11 @@ type ExponentialHistogramDataPoint struct { // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January // 1970. TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be + // The number of values in the population. Must be // non-negative. This value must be equal to the sum of the "bucket_counts" // values in the positive and negative Buckets plus the "zero_count" field. Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field + // The sum of the values in the population. If count is zero then this field // must be zero. // // Note: Sum should only be filled out when measuring non-negative discrete @@ -1402,7 +1387,7 @@ type ExponentialHistogramDataPoint struct { // scale is not restricted by the protocol, as the permissible // values depend on the range of the data. Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` - // zero_count is the count of values that are either exactly zero or + // The count of values that are either exactly zero or // within the region considered zero by the instrumentation at the // tolerated degree of precision. This bucket stores values that // cannot be expressed using the standard exponential formula as @@ -1421,9 +1406,9 @@ type ExponentialHistogramDataPoint struct { // (Optional) List of exemplars collected from // measurements that were used to form the data point Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - // min is the minimum value over (start_time, end_time]. + // The minimum value over (start_time, end_time]. Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` - // max is the maximum value over (start_time, end_time]. + // The maximum value over (start_time, end_time]. Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` // ZeroThreshold may be optionally set to convey the width of the zero // region. Where the zero region is defined as the closed interval @@ -1576,16 +1561,7 @@ type SummaryDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1838,11 +1814,11 @@ type ExponentialHistogramDataPoint_Buckets struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Offset is the bucket index of the first entry in the bucket_counts array. + // The bucket index of the first entry in the bucket_counts array. // // Note: This uses a varint encoding as a simple form of compression. Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - // bucket_counts is an array of count values, where bucket_counts[i] carries + // An array of count values, where bucket_counts[i] carries // the count of the bucket at index (offset+i). bucket_counts[i] is the count // of values greater than base^(offset+i) and less than or equal to // base^(offset+i+1). diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index 3bcfcd82b..301247ddf 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -44,18 +44,9 @@ type Resource struct { // Set of attributes that describe the resource. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // The number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Set of entities that participate in this Resource. diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index f9bdfc0aa..d7bfca902 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -388,7 +388,8 @@ type ScopeSpans struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. + // This schema_url applies to the data in the "scope" field and all spans and span + // events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -512,21 +513,21 @@ type Span struct { // two spans with the same name may be distinguished using `CLIENT` (caller) // and `SERVER` (callee) to identify queueing latency associated with the span. Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time + // The start time of the span. On the client side, this is the time // kept by the local machine where the span execution starts. On the server side, this // is the time when the server's application handler starts running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time + // The end time of the span. On the client side, this is the time // kept by the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes + // A collection of key/value pairs. Note, global attributes // like server name can be set using the resource API. Examples of attributes: // // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" @@ -536,30 +537,21 @@ type Span struct { // // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes + // The number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many // attributes. If this value is 0, then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. + // A collection of Event items. Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no + // The number of dropped events. If the value is 0, then no // events were dropped. DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span + // A collection of Links, which are references from this span to a span // in the same or different trace. Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was + // The number of dropped links after the maximum size was // enforced. If this value is 0, then no links were dropped. DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` // An optional final status for this span. Semantically when Status isn't set, it means @@ -777,26 +769,17 @@ type Span_Event struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // time_unix_nano is the time the event occurred. + // The time the event occurred. TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. + // The name of the event. // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. + // A collection of attribute key/value pairs on the event. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } @@ -877,21 +860,12 @@ type Span_Link struct { SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. + // A collection of attribute key/value pairs on the link. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - // - // The attribute values SHOULD NOT contain empty values. - // The attribute values SHOULD NOT contain bytes values. - // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, - // double values. - // The attribute values SHOULD NOT contain kvlist values. - // The behavior of software that receives attributes containing such values can be unpredictable. - // These restrictions can change in a minor release. - // The restrictions take origin from the OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Flags, a bit field. diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 000000000..15a85a635 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 000000000..05fd305da --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 000000000..02e2b17bf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 000000000..ab4e03ba7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 000000000..25fe82363 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 000000000..56af24536 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 000000000..30b1f0892 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 000000000..266d0b092 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 000000000..0b101cd20 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 000000000..f59aa40f6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 000000000..dea1ba961 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go index 29f0a2de4..2b65ec91a 100644 --- a/vendor/golang.org/x/crypto/argon2/argon2.go +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -6,7 +6,7 @@ // Argon2 was selected as the winner of the Password Hashing Competition and can // be used to derive cryptographic keys from passwords. // -// For a detailed specification of Argon2 see [1]. +// For a detailed specification of Argon2 see [argon2-specs.pdf]. // // If you aren't sure which function you need, use Argon2id (IDKey) and // the parameter recommendations for your scenario. @@ -17,7 +17,7 @@ // It uses data-independent memory access, which is preferred for password // hashing and password-based key derivation. Argon2i requires more passes over // memory than Argon2id to protect from trade-off attacks. The recommended -// parameters (taken from [2]) for non-interactive operations are time=3 and to +// parameters (taken from [RFC 9106 Section 7.3]) for non-interactive operations are time=3 and to // use the maximum available memory. // // # Argon2id @@ -27,11 +27,11 @@ // half of the first iteration over the memory and data-dependent memory access // for the rest. Argon2id is side-channel resistant and provides better brute- // force cost savings due to time-memory tradeoffs than Argon2i. The recommended -// parameters for non-interactive operations (taken from [2]) are time=1 and to +// parameters for non-interactive operations (taken from [RFC 9106 Section 7.3]) are time=1 and to // use the maximum available memory. // -// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf -// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +// [argon2-specs.pdf]: https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [RFC 9106 Section 7.3]: https://www.rfc-editor.org/rfc/rfc9106.html#section-7.3 package argon2 import ( @@ -59,7 +59,7 @@ const ( // // key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) // -// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// [RFC 9106 Section 7.3] recommends time=3, and memory=32*1024 as a sensible number. // If using that amount of memory (32 MB) is not possible in some contexts then // the time parameter can be increased to compensate. // @@ -69,6 +69,8 @@ const ( // adjusted to the number of available CPUs. The cost parameters should be // increased as memory latency and CPU parallelism increases. Remember to get a // good random salt. +// +// [RFC 9106 Section 7.3]: https://www.rfc-editor.org/rfc/rfc9106.html#section-7.3 func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) } @@ -83,7 +85,7 @@ func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint3 // // key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) // -// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// [RFC 9106 Section 7.3] recommends time=1, and memory=64*1024 as a sensible number. // If using that amount of memory (64 MB) is not possible in some contexts then // the time parameter can be increased to compensate. // @@ -93,6 +95,8 @@ func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint3 // adjusted to the numbers of available CPUs. The cost parameters should be // increased as memory latency and CPU parallelism increases. Remember to get a // good random salt. +// +// [RFC 9106 Section 7.3]: https://www.rfc-editor.org/rfc/rfc9106.html#section-7.3 func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) } diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 7dd2638e8..769af387e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -29,7 +29,7 @@ loop: MOVD $NUM_ROUNDS, R21 VLD1 (R11), [V30.S4, V31.S4] - // load contants + // load constants // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] WORD $0x4D60E940 diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 8ff087df4..048faef3a 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -3,11 +3,14 @@ // license that can be found in the LICENSE file. // Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. +// performs scalar multiplication on the elliptic curve known as Curve25519 +// according to [RFC 7748]. // -// This package is a wrapper for the X25519 implementation -// in the crypto/ecdh package. +// The curve25519 package is a wrapper for the X25519 implementation in the +// crypto/ecdh package. It is [frozen] and is not accepting new features. +// +// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748 +// [frozen]: https://go.dev/wiki/Frozen package curve25519 import "crypto/ecdh" diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go deleted file mode 100644 index bbf391fe6..000000000 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha3 implements the SHA-3 fixed-output-length hash functions and -// the SHAKE variable-output-length hash functions defined by FIPS-202. -// -// All types in this package also implement [encoding.BinaryMarshaler], -// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and -// unmarshal the internal state of the hash. -// -// Both types of hash function use the "sponge" construction and the Keccak -// permutation. For a detailed specification see http://keccak.noekeon.org/ -// -// # Guidance -// -// If you aren't sure what function you need, use SHAKE256 with at least 64 -// bytes of output. The SHAKE instances are faster than the SHA3 instances; -// the latter have to allocate memory to conform to the hash.Hash interface. -// -// If you need a secret-key MAC (message authentication code), prepend the -// secret key to the input, hash with SHAKE256 and read at least 32 bytes of -// output. -// -// # Security strengths -// -// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security -// strength against preimage attacks of x bits. Since they only produce "x" -// bits of output, their collision-resistance is only "x/2" bits. -// -// The SHAKE-256 and -128 functions have a generic security strength of 256 and -// 128 bits against all attacks, provided that at least 2x bits of their output -// is used. Requesting more than 64 or 32 bytes of output, respectively, does -// not increase the collision-resistance of the SHAKE functions. -// -// # The sponge construction -// -// A sponge builds a pseudo-random function from a public pseudo-random -// permutation, by applying the permutation to a state of "rate + capacity" -// bytes, but hiding "capacity" of the bytes. -// -// A sponge starts out with a zero state. To hash an input using a sponge, up -// to "rate" bytes of the input are XORed into the sponge's state. The sponge -// is then "full" and the permutation is applied to "empty" it. This process is -// repeated until all the input has been "absorbed". The input is then padded. -// The digest is "squeezed" from the sponge in the same way, except that output -// is copied out instead of input being XORed in. -// -// A sponge is parameterized by its generic security strength, which is equal -// to half its capacity; capacity + rate is equal to the permutation's width. -// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means -// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. -// -// # Recommendations -// -// The SHAKE functions are recommended for most new uses. They can produce -// output of arbitrary length. SHAKE256, with an output length of at least -// 64 bytes, provides 256-bit security against all attacks. The Keccak team -// recommends it for most applications upgrading from SHA2-512. (NIST chose a -// much stronger, but much slower, sponge instance for SHA3-512.) -// -// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. -// They produce output of the same length, with the same security strengths -// against all attacks. This means, in particular, that SHA3-256 only has -// 128-bit collision resistance, because its output length is 32 bytes. -package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 31fffbe04..a51269d91 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -2,127 +2,94 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package sha3 implements the SHA-3 hash algorithms and the SHAKE extendable +// output functions defined in FIPS 202. +// +// Most of this package is a wrapper around the crypto/sha3 package in the +// standard library. The only exception is the legacy Keccak hash functions. package sha3 -// This file provides functions for creating instances of the SHA-3 -// and SHAKE hash functions, as well as utility functions for hashing -// bytes. - import ( - "crypto" + "crypto/sha3" "hash" ) // New224 creates a new SHA3-224 hash. // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. +// +// It is a wrapper for the [sha3.New224] function in the standard library. +// +//go:fix inline func New224() hash.Hash { - return new224() + return sha3.New224() } // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. +// +// It is a wrapper for the [sha3.New256] function in the standard library. +// +//go:fix inline func New256() hash.Hash { - return new256() + return sha3.New256() } // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. +// +// It is a wrapper for the [sha3.New384] function in the standard library. +// +//go:fix inline func New384() hash.Hash { - return new384() + return sha3.New384() } // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. +// +// It is a wrapper for the [sha3.New512] function in the standard library. +// +//go:fix inline func New512() hash.Hash { - return new512() -} - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} - -const ( - dsbyteSHA3 = 0b00000110 - dsbyteKeccak = 0b00000001 - dsbyteShake = 0b00011111 - dsbyteCShake = 0b00000100 - - // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in - // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. - rateK256 = (1600 - 256) / 8 - rateK448 = (1600 - 448) / 8 - rateK512 = (1600 - 512) / 8 - rateK768 = (1600 - 768) / 8 - rateK1024 = (1600 - 1024) / 8 -) - -func new224Generic() *state { - return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} -} - -func new256Generic() *state { - return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} -} - -func new384Generic() *state { - return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} -} - -func new512Generic() *state { - return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} -} - -// NewLegacyKeccak256 creates a new Keccak-256 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { - return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} -} - -// NewLegacyKeccak512 creates a new Keccak-512 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { - return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} + return sha3.New512() } // Sum224 returns the SHA3-224 digest of the data. -func Sum224(data []byte) (digest [28]byte) { - h := New224() - h.Write(data) - h.Sum(digest[:0]) - return +// +// It is a wrapper for the [sha3.Sum224] function in the standard library. +// +//go:fix inline +func Sum224(data []byte) [28]byte { + return sha3.Sum224(data) } // Sum256 returns the SHA3-256 digest of the data. -func Sum256(data []byte) (digest [32]byte) { - h := New256() - h.Write(data) - h.Sum(digest[:0]) - return +// +// It is a wrapper for the [sha3.Sum256] function in the standard library. +// +//go:fix inline +func Sum256(data []byte) [32]byte { + return sha3.Sum256(data) } // Sum384 returns the SHA3-384 digest of the data. -func Sum384(data []byte) (digest [48]byte) { - h := New384() - h.Write(data) - h.Sum(digest[:0]) - return +// +// It is a wrapper for the [sha3.Sum384] function in the standard library. +// +//go:fix inline +func Sum384(data []byte) [48]byte { + return sha3.Sum384(data) } // Sum512 returns the SHA3-512 digest of the data. -func Sum512(data []byte) (digest [64]byte) { - h := New512() - h.Write(data) - h.Sum(digest[:0]) - return +// +// It is a wrapper for the [sha3.Sum512] function in the standard library. +// +//go:fix inline +func Sum512(data []byte) [64]byte { + return sha3.Sum512(data) } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go deleted file mode 100644 index 9d85fb621..000000000 --- a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -func new224() *state { - return new224Generic() -} - -func new256() *state { - return new256Generic() -} - -func new384() *state { - return new384Generic() -} - -func new512() *state { - return new512Generic() -} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go deleted file mode 100644 index b908696be..000000000 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc - -package sha3 - -// This function is implemented in keccakf_amd64.s. - -//go:noescape - -func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s deleted file mode 100644 index 99e2f16e9..000000000 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ /dev/null @@ -1,5419 +0,0 @@ -// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. - -//go:build amd64 && !purego && gc - -// func keccakF1600(a *[25]uint64) -TEXT ·keccakF1600(SB), $200-8 - MOVQ a+0(FP), DI - - // Convert the user state into an internal state - NOTQ 8(DI) - NOTQ 16(DI) - NOTQ 64(DI) - NOTQ 96(DI) - NOTQ 136(DI) - NOTQ 160(DI) - - // Execute the KeccakF permutation - MOVQ (DI), SI - MOVQ 8(DI), BP - MOVQ 32(DI), R15 - XORQ 40(DI), SI - XORQ 48(DI), BP - XORQ 72(DI), R15 - XORQ 80(DI), SI - XORQ 88(DI), BP - XORQ 112(DI), R15 - XORQ 120(DI), SI - XORQ 128(DI), BP - XORQ 152(DI), R15 - XORQ 160(DI), SI - XORQ 168(DI), BP - MOVQ 176(DI), DX - MOVQ 184(DI), R8 - XORQ 192(DI), R15 - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000008082, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000000000808a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008000, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000808b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008081, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008009, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000008a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000000088, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080008009, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000008000000a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000008000808b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000000000008b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008089, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008003, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008002, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000000080, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000800a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000008000000a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008081, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008080, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008008, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - NOP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - NOP - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - NOP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - NOP - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - NOP - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - NOP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - NOP - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - NOP - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - NOP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - NOP - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - NOP - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - NOP - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - NOP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Revert the internal state to the user state - NOTQ 8(DI) - NOTQ 16(DI) - NOTQ 64(DI) - NOTQ 96(DI) - NOTQ 136(DI) - NOTQ 160(DI) - RET diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/legacy_hash.go similarity index 83% rename from vendor/golang.org/x/crypto/sha3/sha3.go rename to vendor/golang.org/x/crypto/sha3/legacy_hash.go index 6658c4447..b8784536e 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/legacy_hash.go @@ -4,15 +4,46 @@ package sha3 +// This implementation is only used for NewLegacyKeccak256 and +// NewLegacyKeccak512, which are not implemented by crypto/sha3. +// All other functions in this package are wrappers around crypto/sha3. + import ( "crypto/subtle" "encoding/binary" "errors" + "hash" "unsafe" "golang.org/x/sys/cpu" ) +const ( + dsbyteKeccak = 0b00000001 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK512 = (1600 - 512) / 8 + rateK1024 = (1600 - 1024) / 8 +) + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} + // spongeDirection indicates the direction bytes are flowing through the sponge. type spongeDirection int @@ -173,12 +204,9 @@ func (d *state) Sum(in []byte) []byte { } const ( - magicSHA3 = "sha\x08" - magicShake = "sha\x09" - magicCShake = "sha\x0a" magicKeccak = "sha\x0b" // magic || rate || main state || n || sponge direction - marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 + marshaledSize = len(magicKeccak) + 1 + 200 + 1 + 1 ) func (d *state) MarshalBinary() ([]byte, error) { @@ -187,12 +215,6 @@ func (d *state) MarshalBinary() ([]byte, error) { func (d *state) AppendBinary(b []byte) ([]byte, error) { switch d.dsbyte { - case dsbyteSHA3: - b = append(b, magicSHA3...) - case dsbyteShake: - b = append(b, magicShake...) - case dsbyteCShake: - b = append(b, magicCShake...) case dsbyteKeccak: b = append(b, magicKeccak...) default: @@ -210,12 +232,9 @@ func (d *state) UnmarshalBinary(b []byte) error { return errors.New("sha3: invalid hash state") } - magic := string(b[:len(magicSHA3)]) - b = b[len(magicSHA3):] + magic := string(b[:len(magicKeccak)]) + b = b[len(magicKeccak):] switch { - case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: - case magic == magicShake && d.dsbyte == dsbyteShake: - case magic == magicCShake && d.dsbyte == dsbyteCShake: case magic == magicKeccak && d.dsbyte == dsbyteKeccak: default: return errors.New("sha3: invalid hash state identifier") diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go similarity index 98% rename from vendor/golang.org/x/crypto/sha3/keccakf.go rename to vendor/golang.org/x/crypto/sha3/legacy_keccakf.go index ce48b1dd3..101588c16 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/legacy_keccakf.go @@ -2,10 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !amd64 || purego || !gc - package sha3 +// This implementation is only used for NewLegacyKeccak256 and +// NewLegacyKeccak512, which are not implemented by crypto/sha3. +// All other functions in this package are wrappers around crypto/sha3. + import "math/bits" // rc stores the round constants for use in the ι step. diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go deleted file mode 100644 index 00d8034ae..000000000 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego - -package sha3 - -// This file contains code for using the 'compute intermediate -// message digest' (KIMD) and 'compute last message digest' (KLMD) -// instructions to compute SHA-3 and SHAKE hashes on IBM Z. - -import ( - "hash" - - "golang.org/x/sys/cpu" -) - -// codes represent 7-bit KIMD/KLMD function codes as defined in -// the Principles of Operation. -type code uint64 - -const ( - // function codes for KIMD/KLMD - sha3_224 code = 32 - sha3_256 = 33 - sha3_384 = 34 - sha3_512 = 35 - shake_128 = 36 - shake_256 = 37 - nopad = 0x100 -) - -// kimd is a wrapper for the 'compute intermediate message digest' instruction. -// src must be a multiple of the rate for the given function code. -// -//go:noescape -func kimd(function code, chain *[200]byte, src []byte) - -// klmd is a wrapper for the 'compute last message digest' instruction. -// src padding is handled by the instruction. -// -//go:noescape -func klmd(function code, chain *[200]byte, dst, src []byte) - -type asmState struct { - a [200]byte // 1600 bit state - buf []byte // care must be taken to ensure cap(buf) is a multiple of rate - rate int // equivalent to block size - storage [3072]byte // underlying storage for buf - outputLen int // output length for full security - function code // KIMD/KLMD function code - state spongeDirection // whether the sponge is absorbing or squeezing -} - -func newAsmState(function code) *asmState { - var s asmState - s.function = function - switch function { - case sha3_224: - s.rate = 144 - s.outputLen = 28 - case sha3_256: - s.rate = 136 - s.outputLen = 32 - case sha3_384: - s.rate = 104 - s.outputLen = 48 - case sha3_512: - s.rate = 72 - s.outputLen = 64 - case shake_128: - s.rate = 168 - s.outputLen = 32 - case shake_256: - s.rate = 136 - s.outputLen = 64 - default: - panic("sha3: unrecognized function code") - } - - // limit s.buf size to a multiple of s.rate - s.resetBuf() - return &s -} - -func (s *asmState) clone() *asmState { - c := *s - c.buf = c.storage[:len(s.buf):cap(s.buf)] - return &c -} - -// copyIntoBuf copies b into buf. It will panic if there is not enough space to -// store all of b. -func (s *asmState) copyIntoBuf(b []byte) { - bufLen := len(s.buf) - s.buf = s.buf[:len(s.buf)+len(b)] - copy(s.buf[bufLen:], b) -} - -// resetBuf points buf at storage, sets the length to 0 and sets cap to be a -// multiple of the rate. -func (s *asmState) resetBuf() { - max := (cap(s.storage) / s.rate) * s.rate - s.buf = s.storage[:0:max] -} - -// Write (via the embedded io.Writer interface) adds more data to the running hash. -// It never returns an error. -func (s *asmState) Write(b []byte) (int, error) { - if s.state != spongeAbsorbing { - panic("sha3: Write after Read") - } - length := len(b) - for len(b) > 0 { - if len(s.buf) == 0 && len(b) >= cap(s.buf) { - // Hash the data directly and push any remaining bytes - // into the buffer. - remainder := len(b) % s.rate - kimd(s.function, &s.a, b[:len(b)-remainder]) - if remainder != 0 { - s.copyIntoBuf(b[len(b)-remainder:]) - } - return length, nil - } - - if len(s.buf) == cap(s.buf) { - // flush the buffer - kimd(s.function, &s.a, s.buf) - s.buf = s.buf[:0] - } - - // copy as much as we can into the buffer - n := len(b) - if len(b) > cap(s.buf)-len(s.buf) { - n = cap(s.buf) - len(s.buf) - } - s.copyIntoBuf(b[:n]) - b = b[n:] - } - return length, nil -} - -// Read squeezes an arbitrary number of bytes from the sponge. -func (s *asmState) Read(out []byte) (n int, err error) { - // The 'compute last message digest' instruction only stores the digest - // at the first operand (dst) for SHAKE functions. - if s.function != shake_128 && s.function != shake_256 { - panic("sha3: can only call Read for SHAKE functions") - } - - n = len(out) - - // need to pad if we were absorbing - if s.state == spongeAbsorbing { - s.state = spongeSqueezing - - // write hash directly into out if possible - if len(out)%s.rate == 0 { - klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 - s.buf = s.buf[:0] - return - } - - // write hash into buffer - max := cap(s.buf) - if max > len(out) { - max = (len(out)/s.rate)*s.rate + s.rate - } - klmd(s.function, &s.a, s.buf[:max], s.buf) - s.buf = s.buf[:max] - } - - for len(out) > 0 { - // flush the buffer - if len(s.buf) != 0 { - c := copy(out, s.buf) - out = out[c:] - s.buf = s.buf[c:] - continue - } - - // write hash directly into out if possible - if len(out)%s.rate == 0 { - klmd(s.function|nopad, &s.a, out, nil) - return - } - - // write hash into buffer - s.resetBuf() - if cap(s.buf) > len(out) { - s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] - } - klmd(s.function|nopad, &s.a, s.buf, nil) - } - return -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (s *asmState) Sum(b []byte) []byte { - if s.state != spongeAbsorbing { - panic("sha3: Sum after Read") - } - - // Copy the state to preserve the original. - a := s.a - - // Hash the buffer. Note that we don't clear it because we - // aren't updating the state. - switch s.function { - case sha3_224, sha3_256, sha3_384, sha3_512: - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) - case shake_128, shake_256: - d := make([]byte, s.outputLen, 64) - klmd(s.function, &a, d, s.buf) - return append(b, d[:s.outputLen]...) - default: - panic("sha3: unknown function") - } -} - -// Reset resets the Hash to its initial state. -func (s *asmState) Reset() { - for i := range s.a { - s.a[i] = 0 - } - s.resetBuf() - s.state = spongeAbsorbing -} - -// Size returns the number of bytes Sum will return. -func (s *asmState) Size() int { - return s.outputLen -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (s *asmState) BlockSize() int { - return s.rate -} - -// Clone returns a copy of the ShakeHash in its current state. -func (s *asmState) Clone() ShakeHash { - return s.clone() -} - -// new224 returns an assembly implementation of SHA3-224 if available, -// otherwise it returns a generic implementation. -func new224() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_224) - } - return new224Generic() -} - -// new256 returns an assembly implementation of SHA3-256 if available, -// otherwise it returns a generic implementation. -func new256() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_256) - } - return new256Generic() -} - -// new384 returns an assembly implementation of SHA3-384 if available, -// otherwise it returns a generic implementation. -func new384() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_384) - } - return new384Generic() -} - -// new512 returns an assembly implementation of SHA3-512 if available, -// otherwise it returns a generic implementation. -func new512() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_512) - } - return new512Generic() -} - -// newShake128 returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns a generic implementation. -func newShake128() ShakeHash { - if cpu.S390X.HasSHA3 { - return newAsmState(shake_128) - } - return newShake128Generic() -} - -// newShake256 returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns a generic implementation. -func newShake256() ShakeHash { - if cpu.S390X.HasSHA3 { - return newAsmState(shake_256) - } - return newShake256Generic() -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s deleted file mode 100644 index 826b862c7..000000000 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego - -#include "textflag.h" - -// func kimd(function code, chain *[200]byte, src []byte) -TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG src+16(FP), R2, R3 // R2=base, R3=len - -continue: - WORD $0xB93E0002 // KIMD --, R2 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET - -// func klmd(function code, chain *[200]byte, dst, src []byte) -TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 - // TODO: SHAKE support - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG dst+16(FP), R2, R3 // R2=base, R3=len - LMG src+40(FP), R4, R5 // R4=base, R5=len - -continue: - WORD $0xB93F0024 // KLMD R2, R4 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index a6b3a4281..6f3f70c26 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -4,24 +4,10 @@ package sha3 -// This file defines the ShakeHash interface, and provides -// functions for creating SHAKE and cSHAKE instances, as well as utility -// functions for hashing bytes to arbitrary-length output. -// -// -// SHAKE implementation is based on FIPS PUB 202 [1] -// cSHAKE implementations is based on NIST SP 800-185 [2] -// -// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf -// [2] https://doi.org/10.6028/NIST.SP.800-185 - import ( - "bytes" - "encoding/binary" - "errors" + "crypto/sha3" "hash" "io" - "math/bits" ) // ShakeHash defines the interface to hash functions that support @@ -32,7 +18,7 @@ type ShakeHash interface { hash.Hash // Read reads more output from the hash; reading affects the hash's - // state. (ShakeHash.Read is thus very different from Hash.Sum) + // state. (ShakeHash.Read is thus very different from Hash.Sum.) // It never returns an error, but subsequent calls to Write or Sum // will panic. io.Reader @@ -41,115 +27,18 @@ type ShakeHash interface { Clone() ShakeHash } -// cSHAKE specific context -type cshakeState struct { - *state // SHA-3 state context and Read/Write operations - - // initBlock is the cSHAKE specific initialization set of bytes. It is initialized - // by newCShake function and stores concatenation of N followed by S, encoded - // by the method specified in 3.3 of [1]. - // It is stored here in order for Reset() to be able to put context into - // initial state. - initBlock []byte -} - -func bytepad(data []byte, rate int) []byte { - out := make([]byte, 0, 9+len(data)+rate-1) - out = append(out, leftEncode(uint64(rate))...) - out = append(out, data...) - if padlen := rate - len(out)%rate; padlen < rate { - out = append(out, make([]byte, padlen)...) - } - return out -} - -func leftEncode(x uint64) []byte { - // Let n be the smallest positive integer for which 2^(8n) > x. - n := (bits.Len64(x) + 7) / 8 - if n == 0 { - n = 1 - } - // Return n || x with n as a byte and x an n bytes in big-endian order. - b := make([]byte, 9) - binary.BigEndian.PutUint64(b[1:], x) - b = b[9-n-1:] - b[0] = byte(n) - return b -} - -func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { - c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) - c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) - c.initBlock = append(c.initBlock, S...) - c.Write(bytepad(c.initBlock, c.rate)) - return &c -} - -// Reset resets the hash to initial state. -func (c *cshakeState) Reset() { - c.state.Reset() - c.Write(bytepad(c.initBlock, c.rate)) -} - -// Clone returns copy of a cSHAKE context within its current state. -func (c *cshakeState) Clone() ShakeHash { - b := make([]byte, len(c.initBlock)) - copy(b, c.initBlock) - return &cshakeState{state: c.clone(), initBlock: b} -} - -// Clone returns copy of SHAKE context within its current state. -func (c *state) Clone() ShakeHash { - return c.clone() -} - -func (c *cshakeState) MarshalBinary() ([]byte, error) { - return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) -} - -func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { - b, err := c.state.AppendBinary(b) - if err != nil { - return nil, err - } - b = append(b, c.initBlock...) - return b, nil -} - -func (c *cshakeState) UnmarshalBinary(b []byte) error { - if len(b) <= marshaledSize { - return errors.New("sha3: invalid hash state") - } - if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { - return err - } - c.initBlock = bytes.Clone(b[marshaledSize:]) - return nil -} - // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. func NewShake128() ShakeHash { - return newShake128() + return &shakeWrapper{sha3.NewSHAKE128(), 32, false, sha3.NewSHAKE128} } // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { - return newShake256() -} - -func newShake128Generic() *state { - return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} -} - -func newShake256Generic() *state { - return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} + return &shakeWrapper{sha3.NewSHAKE256(), 64, false, sha3.NewSHAKE256} } // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, @@ -159,10 +48,9 @@ func newShake256Generic() *state { // computations on same input with different S yield unrelated outputs. // When N and S are both empty, this is equivalent to NewShake128. func NewCShake128(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake128() - } - return newCShake(N, S, rateK256, 32, dsbyteCShake) + return &shakeWrapper{sha3.NewCSHAKE128(N, S), 32, false, func() *sha3.SHAKE { + return sha3.NewCSHAKE128(N, S) + }} } // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, @@ -172,10 +60,9 @@ func NewCShake128(N, S []byte) ShakeHash { // computations on same input with different S yield unrelated outputs. // When N and S are both empty, this is equivalent to NewShake256. func NewCShake256(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake256() - } - return newCShake(N, S, rateK512, 64, dsbyteCShake) + return &shakeWrapper{sha3.NewCSHAKE256(N, S), 64, false, func() *sha3.SHAKE { + return sha3.NewCSHAKE256(N, S) + }} } // ShakeSum128 writes an arbitrary-length digest of data into hash. @@ -191,3 +78,42 @@ func ShakeSum256(hash, data []byte) { h.Write(data) h.Read(hash) } + +// shakeWrapper adds the Size, Sum, and Clone methods to a sha3.SHAKE +// to implement the ShakeHash interface. +type shakeWrapper struct { + *sha3.SHAKE + outputLen int + squeezing bool + newSHAKE func() *sha3.SHAKE +} + +func (w *shakeWrapper) Read(p []byte) (n int, err error) { + w.squeezing = true + return w.SHAKE.Read(p) +} + +func (w *shakeWrapper) Clone() ShakeHash { + s := w.newSHAKE() + b, err := w.MarshalBinary() + if err != nil { + panic(err) // unreachable + } + if err := s.UnmarshalBinary(b); err != nil { + panic(err) // unreachable + } + return &shakeWrapper{s, w.outputLen, w.squeezing, w.newSHAKE} +} + +func (w *shakeWrapper) Size() int { return w.outputLen } + +func (w *shakeWrapper) Sum(b []byte) []byte { + if w.squeezing { + panic("sha3: Sum after Read") + } + out := make([]byte, w.outputLen) + // Clone the state so that we don't affect future Write calls. + s := w.Clone() + s.Read(out) + return append(b, out...) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go deleted file mode 100644 index 4276ba4ab..000000000 --- a/vendor/golang.org/x/crypto/sha3/shake_noasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -func newShake128() *state { - return newShake128Generic() -} - -func newShake256() *state { - return newShake256Generic() -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index 37525e1a1..b357e18b0 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -430,8 +430,9 @@ func (c *client) List() ([]*Key, error) { return keys, nil case *failureAgentMsg: return nil, errors.New("agent: failed to list keys") + default: + return nil, fmt.Errorf("agent: failed to list keys, unexpected message type %T", msg) } - panic("unreachable") } // Sign has the agent sign the data using a protocol 2 key as defined @@ -462,8 +463,9 @@ func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFl return &sig, nil case *failureAgentMsg: return nil, errors.New("agent: failed to sign challenge") + default: + return nil, fmt.Errorf("agent: failed to sign challenge, unexpected message type %T", msg) } - panic("unreachable") } // unmarshal parses an agent message in packet, returning the parsed diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go index c1b436108..d12987551 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -112,7 +112,7 @@ func (r *keyring) Unlock(passphrase []byte) error { } // expireKeysLocked removes expired keys from the keyring. If a key was added -// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have +// with a lifetimesecs constraint and seconds >= lifetimesecs seconds have // elapsed, it is removed. The caller *must* be holding the keyring mutex. func (r *keyring) expireKeysLocked() { for _, k := range r.keys { diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go index 88ce4da6c..4e8ff86b6 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/server.go +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -203,6 +203,9 @@ func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse for len(constraints) != 0 { switch constraints[0] { case agentConstrainLifetime: + if len(constraints) < 5 { + return 0, false, nil, io.ErrUnexpectedEOF + } lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) constraints = constraints[5:] case agentConstrainConfirm: diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index 6a5b582aa..7554ed57a 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -8,6 +8,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/des" + "crypto/fips140" "crypto/rc4" "crypto/subtle" "encoding/binary" @@ -15,6 +16,7 @@ import ( "fmt" "hash" "io" + "slices" "golang.org/x/crypto/chacha20" "golang.org/x/crypto/internal/poly1305" @@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, } // cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - CipherAES128CTR: {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - CipherAES192CTR: {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - CipherAES256CTR: {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, +// are not supported and will not be negotiated, even if explicitly configured. +// When FIPS mode is enabled, only FIPS-approved algorithms are included. +var cipherModes = map[string]*cipherMode{} - // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - InsecureCipherRC4128: {16, 0, streamCipherMode(1536, newRC4)}, - InsecureCipherRC4256: {32, 0, streamCipherMode(1536, newRC4)}, +func init() { + cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)} + cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)} + cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)} + // Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode, + // we'll wire it up to NewGCMForSSH in Go 1.26. + // + // For now it means we'll work with fips140=on but not fips140=only. + cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher} + cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher} - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC 4345 introduces improved versions of Arcfour. - InsecureCipherRC4: {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - CipherAES128GCM: {16, 12, newGCMCipher}, - CipherAES256GCM: {32, 12, newGCMCipher}, - CipherChaCha20Poly1305: {64, 0, newChaCha20Cipher}, + if fips140.Enabled() { + defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool { + _, ok := cipherModes[algo] + return !ok + }) + return + } + cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher} + // Insecure ciphers not included in the default configuration. + cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)} + cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)} + cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)} // CBC mode is insecure and so is not included in the default config. // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // needed, it's possible to specify a custom Config to enable it. // You should expect that an active attacker can recover plaintext if // you do. - InsecureCipherAES128CBC: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - InsecureCipherTripleDESCBC: {24, des.BlockSize, newTripleDESCBCCipher}, + cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher} + cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher} } // prefixLen is the length of the packet prefix that contains the packet length diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index c12818fdc..3127e4990 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "slices" "strings" ) @@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // success return nil } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { + if m := auth.method(); !slices.Contains(tried, m) { tried = append(tried, m) } } @@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { findNext: for _, a := range config.Auth { candidateMethod := a.method() - if contains(tried, candidateMethod) { + if slices.Contains(tried, candidateMethod) { continue } for _, meth := range methods { @@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - // An AuthMethod represents an instance of an RFC 4252 authentication method. type AuthMethod interface { // auth authenticates user over transport t. @@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA // Fallback to use if there is no "server-sig-algs" extension or a // common algorithm cannot be found. We use the public key format if the // MultiAlgorithmSigner supports it, otherwise we return an error. - if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) { return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) } @@ -284,7 +276,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA // Filter algorithms based on those supported by MultiAlgorithmSigner. var keyAlgos []string for _, algo := range algorithmsForKeyFormat(keyFormat) { - if contains(as.Algorithms(), underlyingAlgo(algo)) { + if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) { keyAlgos = append(keyAlgos, algo) } } @@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // the key try to use the obtained algorithm as if "server-sig-algs" had // not been implemented if supported from the algorithm signer. if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 { - if contains(as.Algorithms(), KeyAlgoRSA) { + if slices.Contains(as.Algorithms(), KeyAlgoRSA) { // We retry using the compat algorithm after all signers have // been tried normally. signers = append(signers, &multiAlgorithmSigner{ @@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // contain the "publickey" method, do not attempt to authenticate with any // other keys. According to RFC 4252 Section 7, the latter can occur when // additional authentication methods are required. - if success == authSuccess || !contains(methods, cb.method()) { + if success == authSuccess || !slices.Contains(methods, cb.method()) { return success, methods, err } } @@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { // servers send the key type instead. OpenSSH allows any algorithm // that matches the public key, so we do the same. // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 - if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { return false, nil } if !bytes.Equal(msg.PubKey, pubKey) { diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 8bfad16c4..2e44e9c9e 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -6,6 +6,7 @@ package ssh import ( "crypto" + "crypto/fips140" "crypto/rand" "fmt" "io" @@ -256,6 +257,40 @@ type Algorithms struct { PublicKeyAuths []string } +func init() { + if fips140.Enabled() { + defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool { + _, err := hashFunc(underlyingAlgo(algo)) + return err != nil + }) + defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool { + _, err := hashFunc(underlyingAlgo(algo)) + return err != nil + }) + } +} + +func hashFunc(format string) (crypto.Hash, error) { + switch format { + case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256: + return crypto.SHA256, nil + case KeyAlgoECDSA384: + return crypto.SHA384, nil + case KeyAlgoRSASHA512, KeyAlgoECDSA521: + return crypto.SHA512, nil + case KeyAlgoED25519: + // KeyAlgoED25519 doesn't pre-hash. + return 0, nil + case KeyAlgoRSA, InsecureKeyAlgoDSA: + if fips140.Enabled() { + return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format) + } + return crypto.SHA1, nil + default: + return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format) + } +} + // SupportedAlgorithms returns algorithms currently implemented by this package, // excluding those with security issues, which are returned by // InsecureAlgorithms. The algorithms listed here are in preference order. @@ -283,21 +318,6 @@ func InsecureAlgorithms() Algorithms { var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported signature algorithms to their -// respective hashes needed for signing and verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoRSASHA256: crypto.SHA256, - KeyAlgoRSASHA512: crypto.SHA512, - InsecureKeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - // KeyAlgoED25519 doesn't pre-hash. - KeyAlgoSKECDSA256: crypto.SHA256, - KeyAlgoSKED25519: crypto.SHA256, -} - // algorithmsForKeyFormat returns the supported signature algorithms for a given // public key format (PublicKey.Type), in order of preference. See RFC 8332, // Section 2. See also the note in sendKexInit on backwards compatibility. @@ -312,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// keyFormatForAlgorithm returns the key format corresponding to the given +// signature algorithm. It returns an empty string if the signature algorithm is +// invalid or unsupported. +func keyFormatForAlgorithm(sigAlgo string) string { + switch sigAlgo { + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512: + return KeyAlgoRSA + case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01: + return CertAlgoRSAv01 + case KeyAlgoED25519, + KeyAlgoSKED25519, + KeyAlgoSKECDSA256, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + InsecureKeyAlgoDSA, + InsecureCertAlgoDSAv01, + CertAlgoECDSA256v01, + CertAlgoECDSA384v01, + CertAlgoECDSA521v01, + CertAlgoSKECDSA256v01, + CertAlgoED25519v01, + CertAlgoSKED25519v01: + return sigAlgo + default: + return "" + } +} + // isRSA returns whether algo is a supported RSA algorithm, including certificate // algorithms. func isRSA(algo string) bool { algos := algorithmsForKeyFormat(KeyAlgoRSA) - return contains(algos, underlyingAlgo(algo)) + return slices.Contains(algos, underlyingAlgo(algo)) } func isRSACert(algo string) bool { @@ -515,7 +564,7 @@ func (c *Config) SetDefaults() { if kexAlgoMap[k] != nil { // Ignore the KEX if we have no kexAlgoMap definition. kexs = append(kexs, k) - if k == KeyExchangeCurve25519 && !contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) { + if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) { kexs = append(kexs, keyExchangeCurve25519LibSSH) } } diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index 04ccce346..5b4de9eff 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -17,8 +17,18 @@ References: [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 [SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01 + [FIPS 140-3 mode]: https://go.dev/doc/security/fips140 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. + +# FIPS 140-3 mode + +When the program is in [FIPS 140-3 mode], this package behaves as if only SP +800-140C and SP 800-140D approved cipher suites, signature algorithms, +certificate public key types and sizes, and key exchange and derivation +algorithms were implemented. Others are silently ignored and not negotiated, or +rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go +Cryptographic Module selected with GOFIPS140, and may change across Go versions. */ package ssh diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index a90bfe331..4be3cbb6d 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -10,6 +10,7 @@ import ( "io" "log" "net" + "slices" "strings" "sync" ) @@ -527,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error { switch s := k.(type) { case MultiAlgorithmSigner: for _, algo := range algorithmsForKeyFormat(keyFormat) { - if contains(s.Algorithms(), underlyingAlgo(algo)) { + if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) { msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) } } @@ -679,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } - if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) { t.strictMode = true if err := t.conn.setStrictMode(); err != nil { return err @@ -736,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // message with the server-sig-algs extension if the client supports it. See // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. - if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") { supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") extInfo := &extInfoMsg{ NumExtensions: 2, @@ -790,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { for _, k := range hostKeys { if s, ok := k.(MultiAlgorithmSigner); ok { - if !contains(s.Algorithms(), underlyingAlgo(algo)) { + if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) { continue } } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 78aaf0310..5f7fdd851 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -8,12 +8,14 @@ import ( "crypto" "crypto/ecdsa" "crypto/elliptic" + "crypto/fips140" "crypto/rand" "encoding/binary" "errors" "fmt" "io" "math/big" + "slices" "golang.org/x/crypto/curve25519" ) @@ -395,9 +397,27 @@ func ecHash(curve elliptic.Curve) crypto.Hash { return crypto.SHA512 } +// kexAlgoMap defines the supported KEXs. KEXs not included are not supported +// and will not be negotiated, even if explicitly configured. When FIPS mode is +// enabled, only FIPS-approved algorithms are included. var kexAlgoMap = map[string]kexAlgorithm{} func init() { + // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only + // until Go 1.26. + kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{} + kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()} + kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()} + kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()} + + if fips140.Enabled() { + defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool { + _, ok := kexAlgoMap[algo] + return !ok + }) + return + } + p, _ := new(big.Int).SetString(oakleyGroup2, 16) kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{ g: new(big.Int).SetInt64(2), @@ -431,14 +451,10 @@ func init() { hashFunc: crypto.SHA512, } - kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()} - kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()} - kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()} kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{} kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{} kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} - kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{} } // curve25519sha256 implements the curve25519-sha256 (formerly known as diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index a28c0de50..47a07539d 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -27,6 +27,7 @@ import ( "fmt" "io" "math/big" + "slices" "strings" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" @@ -89,6 +90,11 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err } return cert, nil, nil } + if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" { + return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q", + algo, keyFormat) + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) } @@ -191,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey return "", nil, nil, "", nil, io.EOF } -// ParseAuthorizedKey parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. +// ParseAuthorizedKey parses a public key from an authorized_keys file used in +// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored. func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + var lastErr error for len(in) > 0 { end := bytes.IndexByte(in, '\n') if end != -1 { @@ -222,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { return out, comment, options, rest, nil + } else { + lastErr = err } // No key type recognised. Maybe there's an options field at @@ -264,12 +273,18 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { options = candidateOptions return out, comment, options, rest, nil + } else { + lastErr = err } in = rest continue } + if lastErr != nil { + return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr) + } + return nil, "", nil, nil, errors.New("ssh: no key found") } @@ -395,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi } for _, algo := range algorithms { - if !contains(supportedAlgos, algo) { + if !slices.Contains(supportedAlgos, algo) { return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", algo, signer.PublicKey().Type()) } - if !contains(signerAlgos, algo) { + if !slices.Contains(signerAlgos, algo) { return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) } } @@ -486,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte { func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { supportedAlgos := algorithmsForKeyFormat(r.Type()) - if !contains(supportedAlgos, sig.Format) { + if !slices.Contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } - hash := hashFuncs[sig.Format] + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -606,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -651,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := hashFuncs[k.PublicKey().Type()].New() + hash, err := hashFunc(k.PublicKey().Type()) + if err != nil { + return nil, err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -801,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -905,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -1009,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -1112,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm algorithm = s.pubKey.Type() } - if !contains(s.Algorithms(), algorithm) { + if !slices.Contains(s.Algorithms(), algorithm) { return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } - hashFunc := hashFuncs[algorithm] + hashFunc, err := hashFunc(algorithm) + if err != nil { + return nil, err + } var digest []byte if hashFunc != 0 { h := hashFunc.New() @@ -1451,6 +1490,7 @@ type openSSHEncryptedPrivateKey struct { NumKeys uint32 PubKey []byte PrivKeyBlock []byte + Rest []byte `ssh:"rest"` } type openSSHPrivateKey struct { diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go index de2639d57..87d626fbb 100644 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -7,11 +7,13 @@ package ssh // Message authentication support import ( + "crypto/fips140" "crypto/hmac" "crypto/sha1" "crypto/sha256" "crypto/sha512" "hash" + "slices" ) type macMode struct { @@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int { func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } -var macModes = map[string]*macMode{ - HMACSHA512ETM: {64, true, func(key []byte) hash.Hash { +// macModes defines the supported MACs. MACs not included are not supported +// and will not be negotiated, even if explicitly configured. When FIPS mode is +// enabled, only FIPS-approved algorithms are included. +var macModes = map[string]*macMode{} + +func init() { + macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash { return hmac.New(sha512.New, key) - }}, - HMACSHA256ETM: {32, true, func(key []byte) hash.Hash { + }} + macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash { return hmac.New(sha256.New, key) - }}, - HMACSHA512: {64, false, func(key []byte) hash.Hash { + }} + macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash { return hmac.New(sha512.New, key) - }}, - HMACSHA256: {32, false, func(key []byte) hash.Hash { + }} + macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash { return hmac.New(sha256.New, key) - }}, - HMACSHA1: {20, false, func(key []byte) hash.Hash { + }} + + if fips140.Enabled() { + defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool { + _, ok := macModes[algo] + return !ok + }) + return + } + + macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash { return hmac.New(sha1.New, key) - }}, - InsecureHMACSHA196: {20, false, func(key []byte) hash.Hash { + }} + macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash { return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, + }} } diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index 251b9d06a..ab22c3d38 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte { return to[len(s):] } -var bigIntType = reflect.TypeOf((*big.Int)(nil)) +var bigIntType = reflect.TypeFor[*big.Int]() // Decode a packet into its corresponding message. func decode(packet []byte) (interface{}, error) { diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 98679ba5b..064dcbaf5 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "net" + "slices" "strings" ) @@ -43,6 +44,9 @@ type Permissions struct { // pass data from the authentication callbacks to the server // application layer. Extensions map[string]string + + // ExtraData allows to store user defined data. + ExtraData map[any]any } type GSSAPIWithMICConfig struct { @@ -126,6 +130,21 @@ type ServerConfig struct { // Permissions.Extensions entry. PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + // VerifiedPublicKeyCallback, if non-nil, is called after a client + // successfully confirms having control over a key that was previously + // approved by PublicKeyCallback. The permissions object passed to the + // callback is the one returned by PublicKeyCallback for the given public + // key and its ownership is transferred to the callback. The returned + // Permissions object can be the same object, optionally modified, or a + // completely new object. If VerifiedPublicKeyCallback is non-nil, + // PublicKeyCallback is not allowed to return a PartialSuccessError, which + // can instead be returned by VerifiedPublicKeyCallback. + // + // VerifiedPublicKeyCallback does not affect which authentication methods + // are included in the list of methods that can be attempted by the client. + VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions, + signatureAlgorithm string) (*Permissions, error) + // KeyboardInteractiveCallback, if non-nil, is called when // keyboard-interactive authentication is selected (RFC // 4256). The client object's Challenge function should be @@ -246,7 +265,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos } else { for _, algo := range fullConf.PublicKeyAuthAlgorithms { - if !contains(SupportedAlgorithms().PublicKeyAuths, algo) && !contains(InsecureAlgorithms().PublicKeyAuths, algo) { + if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) { c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) } @@ -631,7 +650,7 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } algo := string(algoBytes) - if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { + if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) break } @@ -652,6 +671,9 @@ userAuthLoop: candidate.pubKeyData = pubKeyData candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil { + return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined") + } if (candidate.result == nil || isPartialSuccessError) && candidate.perms != nil && @@ -695,7 +717,7 @@ userAuthLoop: // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public // key type. The algorithm and public key type must be // consistent: both must be certificate algorithms, or neither. - if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) { authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", pubKey.Type(), algo) break @@ -705,7 +727,7 @@ userAuthLoop: // algorithm name that corresponds to algo with // sig.Format. This is usually the same, but // for certs, the names differ. - if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { + if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } @@ -722,6 +744,12 @@ userAuthLoop: authErr = candidate.result perms = candidate.perms + if authErr == nil && config.VerifiedPublicKeyCallback != nil { + // Only call VerifiedPublicKeyCallback after the key has been accepted + // and successfully verified. If authErr is non-nil, the key is not + // considered verified and the callback must not run. + perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo) + } } case "gssapi-with-mic": if authConfig.GSSAPIWithMICConfig == nil { diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go index 24bd7c8e8..a6249a122 100644 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -106,6 +106,13 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { if !ok { return nil, errors.New("parse uint32 failed") } + // Each ASN.1 encoded OID must have a minimum + // of 2 bytes; 64 maximum mechanisms is an + // arbitrary, but reasonable ceiling. + const maxMechs = 64 + if n > maxMechs || int(n)*2 > len(rest) { + return nil, errors.New("invalid mechanism count") + } s := &userAuthRequestGSSAPI{ N: n, OIDS: make([]asn1.ObjectIdentifier, n), @@ -122,7 +129,6 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { return nil, err } - } return s, nil } diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go index b171b330b..152470fcb 100644 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -44,7 +44,7 @@ func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { if !ok { return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + ch := c.forwards.add("unix", socketPath) return &unixListener{socketPath, c, ch}, nil } @@ -96,7 +96,7 @@ func (l *unixListener) Accept() (net.Conn, error) { // Close closes the listener. func (l *unixListener) Close() error { // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + l.conn.forwards.remove("unix", l.socketPath) m := streamLocalChannelForwardMsg{ l.socketPath, } diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index 93d844f03..78c41fe5a 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -11,6 +11,7 @@ import ( "io" "math/rand" "net" + "net/netip" "strconv" "strings" "sync" @@ -22,14 +23,21 @@ import ( // the returned net.Listener. The listener must be serviced, or the // SSH connection may hang. // N must be "tcp", "tcp4", "tcp6", or "unix". +// +// If the address is a hostname, it is sent to the remote peer as-is, without +// being resolved locally, and the Listener Addr method will return a zero IP. func (c *Client) Listen(n, addr string) (net.Listener, error) { switch n { case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) + host, portStr, err := net.SplitHostPort(addr) if err != nil { return nil, err } - return c.ListenTCP(laddr) + port, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + return nil, err + } + return c.listenTCPInternal(host, int(port)) case "unix": return c.ListenUnix(addr) default: @@ -102,15 +110,24 @@ func (c *Client) handleForwards() { // ListenTCP requests the remote peer open a listening socket // on laddr. Incoming connections will be available by calling // Accept on the returned net.Listener. +// +// ListenTCP accepts an IP address, to provide a hostname use [Client.Listen] +// with "tcp", "tcp4", or "tcp6" network instead. func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { c.handleForwardsOnce.Do(c.handleForwards) if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { return c.autoPortListenWorkaround(laddr) } + return c.listenTCPInternal(laddr.IP.String(), laddr.Port) +} + +func (c *Client) listenTCPInternal(host string, port int) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), + host, + uint32(port), } // send message ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) @@ -123,20 +140,33 @@ func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { // If the original port was 0, then the remote side will // supply a real port number in the response. - if laddr.Port == 0 { + if port == 0 { var p struct { Port uint32 } if err := Unmarshal(resp, &p); err != nil { return nil, err } - laddr.Port = int(p.Port) + port = int(p.Port) } + // Construct a local address placeholder for the remote listener. If the + // original host is an IP address, preserve it so that Listener.Addr() + // reports the same IP. If the host is a hostname or cannot be parsed as an + // IP, fall back to IPv4zero. The port field is always set, even if the + // original port was 0, because in that case the remote server will assign + // one, allowing callers to determine which port was selected. + ip := net.IPv4zero + if parsed, err := netip.ParseAddr(host); err == nil { + ip = net.IP(parsed.AsSlice()) + } + laddr := &net.TCPAddr{ + IP: ip, + Port: port, + } + addr := net.JoinHostPort(host, strconv.FormatInt(int64(port), 10)) + ch := c.forwards.add("tcp", addr) - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil + return &tcpListener{laddr, addr, c, ch}, nil } // forwardList stores a mapping between remote @@ -149,8 +179,9 @@ type forwardList struct { // forwardEntry represents an established mapping of a laddr on a // remote ssh server to a channel connected to a tcpListener. type forwardEntry struct { - laddr net.Addr - c chan forward + addr string // host:port or socket path + network string // tcp or unix + c chan forward } // forward represents an incoming forwarded tcpip connection. The @@ -161,12 +192,13 @@ type forward struct { raddr net.Addr // the raddr of the incoming connection } -func (l *forwardList) add(addr net.Addr) chan forward { +func (l *forwardList) add(n, addr string) chan forward { l.Lock() defer l.Unlock() f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), + addr: addr, + network: n, + c: make(chan forward, 1), } l.entries = append(l.entries, f) return f.c @@ -185,19 +217,20 @@ func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { if port == 0 || port > 65535 { return nil, fmt.Errorf("ssh: port number out of range: %d", port) } - ip := net.ParseIP(string(addr)) - if ip == nil { + ip, err := netip.ParseAddr(addr) + if err != nil { return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil + return &net.TCPAddr{IP: net.IP(ip.AsSlice()), Port: int(port)}, nil } func (l *forwardList) handleChannels(in <-chan NewChannel) { for ch := range in { var ( - laddr net.Addr - raddr net.Addr - err error + addr string + network string + raddr net.Addr + err error ) switch channelType := ch.ChannelType(); channelType { case "forwarded-tcpip": @@ -207,40 +240,34 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) { continue } - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } + // RFC 4254 section 7.2 specifies that incoming addresses should + // list the address that was connected, in string format. It is the + // same address used in the tcpip-forward request. The originator + // address is an IP address instead. + addr = net.JoinHostPort(payload.Addr, strconv.FormatUint(uint64(payload.Port), 10)) + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) if err != nil { ch.Reject(ConnectionFailed, err.Error()) continue } - + network = "tcp" case "forwarded-streamlocal@openssh.com": var payload forwardedStreamLocalPayload if err = Unmarshal(ch.ExtraData(), &payload); err != nil { ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) continue } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } + addr = payload.SocketPath raddr = &net.UnixAddr{ Name: "@", Net: "unix", } + network = "unix" default: panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) } - if ok := l.forward(laddr, raddr, ch); !ok { + if ok := l.forward(network, addr, raddr, ch); !ok { // Section 7.2, implementations MUST reject spurious incoming // connections. ch.Reject(Prohibited, "no forward for address") @@ -252,11 +279,11 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) { // remove removes the forward entry, and the channel feeding its // listener. -func (l *forwardList) remove(addr net.Addr) { +func (l *forwardList) remove(n, addr string) { l.Lock() defer l.Unlock() for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + if n == f.network && addr == f.addr { l.entries = append(l.entries[:i], l.entries[i+1:]...) close(f.c) return @@ -274,11 +301,11 @@ func (l *forwardList) closeAll() { l.entries = nil } -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { +func (l *forwardList) forward(n, addr string, raddr net.Addr, ch NewChannel) bool { l.Lock() defer l.Unlock() for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + if n == f.network && addr == f.addr { f.c <- forward{newCh: ch, raddr: raddr} return true } @@ -288,6 +315,7 @@ func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { type tcpListener struct { laddr *net.TCPAddr + addr string conn *Client in <-chan forward @@ -314,13 +342,21 @@ func (l *tcpListener) Accept() (net.Conn, error) { // Close closes the listener. func (l *tcpListener) Close() error { + host, port, err := net.SplitHostPort(l.addr) + if err != nil { + return err + } + rport, err := strconv.ParseUint(port, 10, 32) + if err != nil { + return err + } m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), + host, + uint32(rport), } // this also closes the listener. - l.conn.forwards.remove(l.laddr) + l.conn.forwards.remove("tcp", l.addr) ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) if err == nil && !ok { err = errors.New("ssh: cancel-tcpip-forward failed") diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index 663619845..fa3dd6a42 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "errors" + "fmt" "io" "log" ) @@ -254,6 +255,9 @@ var ( // (to setup server->client keys) or clientKeys (for client->server keys). func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] + if cipherMode == nil { + return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher) + } iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index d3cb95175..24cea6882 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -2,42 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package context defines the Context type, which carries deadlines, -// cancellation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name [context]. +// Package context has been superseded by the standard library [context] package. // -// Incoming requests to a server should create a [Context], and outgoing -// calls to servers should accept a Context. The chain of function -// calls between them must propagate the Context, optionally replacing -// it with a derived Context created using [WithCancel], [WithDeadline], -// [WithTimeout], or [WithValue]. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. This is discussed further in -// https://go.dev/blog/context-and-structs. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See https://go.dev/blog/context for example code for a server that uses -// Contexts. +// Deprecated: Use the standard library context package instead. package context import ( diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index 02fe0c2d4..8a7a89d01 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -128,6 +130,9 @@ func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { if h2.MaxConcurrentStreams != 0 { conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } if h2.MaxEncoderHeaderTableSize != 0 { conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) } diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 000000000..b4373fe33 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 000000000..6b071c149 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index db3264da8..9a4bd123c 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1152,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1167,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6878f8ecc..105fe12fe 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 64085f6e1..bdc5520eb 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -181,6 +181,10 @@ type Server struct { type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -212,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -224,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -1124,25 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -func getErrChan() chan error { - if inTests { - // Channels cannot be reused across synctest tests. - return make(chan error, 1) - } else { - return errChanPool.Get().(chan error) - } -} - -func putErrChan(ch chan error) { - if !inTests { - errChanPool.Put(ch) - } -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1150,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := getErrChan() + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1182,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - putErrChan(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -2436,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = getErrChan() + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2448,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - putErrChan(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -3129,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: getErrChan(), + done: sc.srv.state.getErrChan(), } select { @@ -3146,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - putErrChan(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 35e390251..ccb87e6da 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -355,6 +356,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -374,11 +376,24 @@ type ClientConn struct { // completely unresponsive connection. pendingResets int + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -708,7 +723,7 @@ func canRetryError(err error) bool { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse) + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -718,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -770,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { conf := configFromTransport(t) cc := &ClientConn{ t: t, @@ -784,7 +799,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -794,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), lastActive: time.Now(), + internalStateHook: internalStateHook, } if t.transportTestHooks != nil { t.transportTestHooks.newclientconn(cc) @@ -1018,7 +1035,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1034,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - !cc.doNotReuse && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). @@ -1053,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + // currentRequestCountLocked reports the number of concurrency slots currently in use, // including active streams, reserved slots, and reset streams waiting for acknowledgement. func (cc *ClientConn) currentRequestCountLocked() int { @@ -1064,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1088,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1613,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } bodyClosed := cs.reqBodyClosed closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1644,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // // This could be due to the server becoming unresponsive. // To avoid sending too many requests on a dead connection, - // we let the request continue to consume a concurrency slot - // until we can confirm the server is still responding. + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. // We do this by sending a PING frame along with the RST_STREAM // (unless a ping is already in flight). // @@ -1656,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // because it's short lived and will probably be closed before // we get the ping response. ping := false - if !closeOnIdle { + if !closeOnIdle && !readSinceStream { cc.mu.Lock() // rstStreamPingsBlocked works around a gRPC behavior: // see comment on the field for details. @@ -1690,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. @@ -2742,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt // See comment on ClientConn.rstStreamPingsBlocked for details. rl.cc.rstStreamPingsBlocked = false } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2792,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -2972,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3074,35 +3130,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close +} + +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr != nil { + return nil, gz.zerr + } + if gz.zr == nil { + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr + } + } + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil +} + +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil + } + gz.zerr = fs.ErrClosed } func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr + zr, err := gz.acquire() + if err != nil { + return 0, err } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) + defer gz.release(zr) + + return zr.Read(p) } func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err - } - gz.zerr = fs.ErrClosed - return nil + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3128,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3141,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index cc893adc2..7de27be52 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} + } + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go similarity index 77% rename from vendor/golang.org/x/net/http2/writesched_priority.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index f6783339d..4e33c29a2 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -11,7 +11,7 @@ import ( ) // RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { @@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler } } - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler return ws } -type priorityNodeState int +type priorityNodeStateRFC7540 int const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *priorityNode) setParent(parent *priorityNode) { +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) { } } -func (n *priorityNode) addBytes(b int64) { +func (n *priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) + openParent = openParent || (n.state == priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(sortPriorityNodeSiblings(*tmp)) + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f return false } -type sortPriorityNodeSiblings []*priorityNode +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type priorityWriteScheduler struct { +type priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root priorityNode + root priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode + nodes map[uint32]*priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -239,7 +239,7 @@ type priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode + closedNodes, idleNodes []*priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -248,19 +248,19 @@ type priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode + tmp []*priorityNodeRFC7540 // pool of empty queues for reuse. queuePool writeQueuePool } -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { + if curr.state != priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = priorityNodeOpen + curr.state = priorityNodeOpenRFC7540 return } @@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream if parent == nil { parent = &ws.root } - n := &priorityNode{ + n := &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream } } -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != priorityNodeOpen { + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = priorityNodeClosed + n.state = priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { @@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit return } ws.maxID = streamID - n = &priorityNode{ + n = &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = priorityDefaultWeight + n.weight = priorityDefaultWeightRFC7540 return } @@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit n.weight = priority.Weight } -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { n.q.push(wr) } -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { return wr, ok } -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { if maxSize == 0 { return } @@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max *list = append(*list, n) } -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 000000000..dfbfc1eb3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,224 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool + + // priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we + // receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame. + priorityUpdateBuf struct { + // streamID being 0 means that the buffer is empty. This is a safe + // assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR. + streamID uint32 + priority PriorityParam + } +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + if streamID == ws.priorityUpdateBuf.streamID { + ws.priorityUpdateBuf.streamID = 0 + opt.priority = ws.priorityUpdateBuf.priority + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + ws.priorityUpdateBuf.streamID = streamID + ws.priorityUpdateBuf.priority = priority + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe86322..737cff9ec 100644 --- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 4b7055317..1e10f89eb 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -51,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -399,7 +399,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go index 84fcc32b6..8eedb84ce 100644 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, b = append(b, up.Username...) b = append(b, byte(len(up.Password))) b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if + // TODO(mikio): handle IO deadlines and cancellation if // necessary if _, err := rw.Write(b); err != nil { return err diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index 3aaffdd1f..c2b3c0098 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { Buckets: buckets, } - data.Families = make([]string, 0, len(families)) famMu.RLock() + data.Families = make([]string, 0, len(families)) for name := range families { data.Families = append(data.Families, name) } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index 22cc99844..3b0450a06 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -9,31 +9,27 @@ // func getisar0() uint64 TEXT ·getisar0(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 + MRS ID_AA64ISAR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getisar1() uint64 TEXT ·getisar1(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 + MRS ID_AA64ISAR1_EL1, R0 MOVD R0, ret+0(FP) RET // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 + MRS ID_AA64PFR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getzfr0() uint64 TEXT ·getzfr0(SB),NOSPLIT,$0-8 // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 + MRS ID_AA64ZFR0_EL1, R0 MOVD R0, ret+0(FP) RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 1e642f330..f5723d4f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -64,6 +64,80 @@ func initOptions() { func archInit() { + // From internal/cpu + const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + + // ecx bits + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VAES = 1 << 9 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 + + // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0 + cpuid_BMI1 = 1 << 3 + cpuid_AVX2 = 1 << 5 + cpuid_BMI2 = 1 << 8 + cpuid_ERMS = 1 << 9 + cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 + cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 + cpuid_SHA = 1 << 29 + cpuid_AVX512BW = 1 << 30 + cpuid_AVX512VL = 1 << 31 + + // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0 + cpuid_AVX512_VBMI = 1 << 1 + cpuid_AVX512_VBMI2 = 1 << 6 + cpuid_GFNI = 1 << 8 + cpuid_AVX512VPCLMULQDQ = 1 << 10 + cpuid_AVX512_BITALG = 1 << 12 + + // edx bits + cpuid_FSRM = 1 << 4 + // edx bits for CPUID 0x80000001 + cpuid_RDTSCP = 1 << 27 + ) + // Additional constants not in internal/cpu + const ( + // eax=1: edx + cpuid_SSE2 = 1 << 26 + // eax=1: ecx + cpuid_CX16 = 1 << 13 + cpuid_RDRAND = 1 << 30 + // eax=7,ecx=0: ebx + cpuid_RDSEED = 1 << 18 + cpuid_AVX512IFMA = 1 << 21 + cpuid_AVX512PF = 1 << 26 + cpuid_AVX512ER = 1 << 27 + // eax=7,ecx=0: edx + cpuid_AVX5124VNNIW = 1 << 2 + cpuid_AVX5124FMAPS = 1 << 3 + cpuid_AMXBF16 = 1 << 22 + cpuid_AMXTile = 1 << 24 + cpuid_AMXInt8 = 1 << 25 + // eax=7,ecx=1: eax + cpuid_AVX512BF16 = 1 << 5 + cpuid_AVXIFMA = 1 << 23 + // eax=7,ecx=1: edx + cpuid_AVXVNNIInt8 = 1 << 4 + ) + Initialized = true maxID, _, _, _ := cpuid(0, 0) @@ -73,90 +147,90 @@ func archInit() { } _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) + X86.HasSSE2 = isSet(edx1, cpuid_SSE2) - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) + X86.HasSSE3 = isSet(ecx1, cpuid_SSE3) + X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ) + X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3) + X86.HasFMA = isSet(ecx1, cpuid_FMA) + X86.HasCX16 = isSet(ecx1, cpuid_CX16) + X86.HasSSE41 = isSet(ecx1, cpuid_SSE41) + X86.HasSSE42 = isSet(ecx1, cpuid_SSE42) + X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT) + X86.HasAES = isSet(ecx1, cpuid_AES) + X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE) + X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND) var osSupportsAVX, osSupportsAVX512 bool // For XGETBV, OSXSAVE bit is required and sufficient. if X86.HasOSXSAVE { eax, _ := xgetbv() // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) + osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2) if runtime.GOOS == "darwin" { // Darwin requires special AVX512 checks, see cpu_darwin_x86.go osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7) } } - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX if maxID < 7 { return } eax7, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) + X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) + X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX + X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) + X86.HasERMS = isSet(ebx7, cpuid_ERMS) + X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED) + X86.HasADX = isSet(ebx7, cpuid_ADX) - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension if X86.HasAVX512 { X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) + X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER) + X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF) + X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) + X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI) + X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW) + X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) } - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) + X86.HasAMXTile = isSet(edx7, cpuid_AMXTile) + X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8) + X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16) // These features depend on the second level of extended features. if eax7 >= 1 { eax71, _, _, edx71 := cpuid(7, 1) if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) + X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16) } if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) + X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA) + X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI) + X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8) } } } -func isSet(bitpos uint, value uint32) bool { - return value&(1< #include #include +#include #include #include #include @@ -255,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -529,6 +531,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || @@ -611,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a6570..06c0eea6f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099a..34a467697 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b6db27d93..120a7b35d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -853,20 +853,86 @@ const ( DM_VERSION_MAJOR = 0x4 DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -1152,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1539,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c @@ -2276,7 +2354,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2463,6 +2701,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2758,6 +3049,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -3091,6 +3399,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3317,6 +3666,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3553,6 +3912,8 @@ const ( UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fbc..97a61fc5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34ae..a0d6d498c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c12..dd9c903f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba1..384c61ca3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c0..6384c9831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3c..553c1c6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a63..b3339f209 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a3033..177091d2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c87..c5abf156d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb0..f1f3fadf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce2409..203ad9c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb5..4b9abcb21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e9526..f87983037 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7c..64347eb35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e6..7d7191171 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb2..8935d10a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 944e75a11..c1a467017 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3590,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3597,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -6332,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9..50e8e6449 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 640f6b153..69439df2a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -890,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -914,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 993a2297d..6e4f50eb4 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -2304,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 641a5f4b7..f25b7308a 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -238,6 +242,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +289,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1622,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { @@ -1662,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { return } +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { @@ -1682,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa return } +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { var _p0 uint32 if initialNotification { @@ -2111,6 +2150,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { return } +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func FlushFileBuffers(handle Handle) (err error) { r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { @@ -2481,6 +2528,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro return } +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index bddb2e2ae..6ec537cdc 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -160,7 +160,9 @@ const ( keyEnd keyDeleteWord keyDeleteLine + keyDelete keyClearScreen + keyTranspose keyPasteStart keyPasteEnd ) @@ -194,6 +196,8 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { return keyDeleteLine, b[1:] case 12: // ^L return keyClearScreen, b[1:] + case 20: // ^T + return keyTranspose, b[1:] case 23: // ^W return keyDeleteWord, b[1:] case 14: // ^N @@ -228,6 +232,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { } } + if !pasteActive && len(b) >= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' { + return keyDelete, b[4:] + } + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': @@ -413,7 +421,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -438,7 +446,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -478,7 +486,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. @@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) - case keyCtrlD: + case keyCtrlD, keyDelete: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). @@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } case keyCtrlU: t.eraseNPreviousChars(t.pos) + case keyTranspose: + // This transposes the two characters around the cursor and advances the cursor. Best-effort. + if len(t.line) < 2 || t.pos < 1 { + return + } + swap := t.pos + if swap == len(t.line) { + swap-- // special: at end of line, swap previous two chars + } + t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1] + if t.pos < len(t.line) { + t.pos++ + } + if t.echo { + t.moveCursorToPos(swap - 1) + t.writeLine(t.line[swap-1:]) + t.moveCursorToPos(t.pos) + } case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b..fb8273236 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 794b2e32b..563270c15 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) { // update state r.lim.last = t r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { + if r.timeToAct.Equal(r.lim.lastEvent) { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 1de0ce666..2079de7b0 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -33,17 +33,21 @@ guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly: +Please read the following carefully to ensure your contributions can be merged +smoothly and quickly. + +### PR Contents - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often receive PRs that attempt to fix several things at the same time, and if one part of the PR has a problem, that will hold up the entire PR. -- For **speculative changes**, consider opening an issue and discussing it - first. If you are suggesting a behavioral or API change, consider starting - with a [gRFC proposal](https://github.com/grpc/proposal). Many new features - that are not bug fixes will require cross-language agreement. +- If your change does not address an **open issue** with an **agreed + resolution**, consider opening an issue and discussing it first. If you are + suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). Many new features that are not + bug fixes will require cross-language agreement. - If you want to fix **formatting or style**, consider whether your changes are an obvious improvement or might be considered a personal preference. If a @@ -56,16 +60,6 @@ How to get your contributions merged smoothly and quickly: often written as "iff". Please do not make spelling correction changes unless you are certain they are misspellings. -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a GitHub issue if it exists. - -- Maintain a **clean commit history** and use **meaningful commit messages**. - PRs with messy commit histories are difficult to review and won't be merged. - Before sending your PR, ensure your changes are based on top of the latest - `upstream/master` commits, and avoid rebasing in the middle of a code review. - You should **never use `git push -f`** unless absolutely necessary during a - review, as it can interfere with GitHub's tracking of comments. - - **All tests need to be passing** before your change can be merged. We recommend you run tests locally before creating your PR to catch breakages early on: @@ -81,15 +75,80 @@ How to get your contributions merged smoothly and quickly: GitHub, which will trigger a GitHub Actions run that you can use to verify everything is passing. -- If you are adding a new file, make sure it has the **copyright message** +- Note that there are two GitHub actions checks that need not be green: + + 1. We test the freshness of the generated proto code we maintain via the + `vet-proto` check. If the source proto files are updated, but our repo is + not updated, an optional checker will fail. This will be fixed by our team + in a separate PR and will not prevent the merge of your PR. + + 2. We run a checker that will fail if there is any change in dependencies of + an exported package via the `dependencies` check. If new dependencies are + added that are not appropriate, we may not accept your PR (see below). + +- If you are adding a **new file**, make sure it has the **copyright message** template at the top as a comment. You can copy the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number of exceptions. **If your contribution introduces new dependencies**, you will - need a discussion with gRPC-Go maintainers. A GitHub action check will run on - every PR, and will flag any transitive dependency changes from any public - package. + need a discussion with gRPC-Go maintainers. + +### PR Descriptions + +- **PR titles** should start with the name of the component being addressed, or + the type of change. Examples: transport, client, server, round_robin, xds, + cleanup, deps. + +- Read and follow the **guidelines for PR titles and descriptions** here: + https://google.github.io/eng-practices/review/developer/cl-descriptions.html + + *particularly* the sections "First Line" and "Body is Informative". + + Note: your PR description will be used as the git commit message in a + squash-and-merge if your PR is approved. We may make changes to this as + necessary. + +- **Does this PR relate to an open issue?** On the first line, please use the + tag `Fixes #` to ensure the issue is closed when the PR is merged. Or + use `Updates #` if the PR is related to an open issue, but does not fix + it. Consider filing an issue if one does not already exist. + +- PR descriptions *must* conclude with **release notes** as follows: + + ``` + RELEASE NOTES: + * : + ``` + + This need not match the PR title. + + The summary must: + + * be something that gRPC users will understand. + + * clearly explain the feature being added, the issue being fixed, or the + behavior being changed, etc. If fixing a bug, be clear about how the bug + can be triggered by an end-user. + + * begin with a capital letter and use complete sentences. + + * be as short as possible to describe the change being made. + + If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or + GitHub-related, use `RELEASE NOTES: n/a`. + +### PR Process + +- Please **self-review** your code changes before sending your PR. This will + prevent simple, obvious errors from causing delays. + +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. - Unless your PR is trivial, you should **expect reviewer comments** that you will need to address before merging. We'll label the PR as `Status: Requires @@ -98,5 +157,3 @@ How to get your contributions merged smoothly and quickly: `stale`, and we will automatically close it after 7 days if we don't hear back from you. Please feel free to ping issues or bugs if you do not get a response within a week. - -- Exceptions to the rules can be made if there's a compelling reason to do so. diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea8899818..b4bc3a2bf 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) - } - if b.subConn != subConn { - if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") - } - return - } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil - return - } +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 67f315a0d..000000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,906 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -// enableHealthListenerKeyType is a unique key type used in resolver -// attributes to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "{disconnection}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMapV2[*scData](), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMapV2[*scData] - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.subConn.Shutdown() - } - b.subConns = resolver.NewAddressMapV2[*scData]() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMapV2[*scData]() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMapV2[bool]() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMapV2[*scData]() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - switch sd.rawConnectivityState { - case connectivity.Idle: - sd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - sd.connectionFailedInFirstPass = true - lastErr = sd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 22045bf39..22e6e3267 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef6..2c760e623 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b1364a032..42c61cf9f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 3f762285d..b767d3e33 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -35,16 +35,19 @@ import ( "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -97,6 +100,41 @@ var ( errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") ) +var ( + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"}, + Default: false, + }) + openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{ + Name: "grpc.subchannel.open_connections", + Description: "EXPERIMENTAL. Number of open connections.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"}, + Default: false, + }) +) + const ( defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 defaultClientMaxSendMessageSize = math.MaxInt32 @@ -210,7 +248,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -260,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }() // This creates the name resolver, load balancer, etc. - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - return nil, err + if err := cc.exitIdleMode(); err != nil { + return nil, fmt.Errorf("failed to exit idle mode: %w", err) } + cc.idlenessMgr.UnsafeSetNotIdle() // Return now for non-blocking dials. if !cc.dopts.block { @@ -330,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) { Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelz, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 1, ted) } type idler ClientConn @@ -339,14 +379,17 @@ func (i *idler) EnterIdleMode() { (*ClientConn)(i).enterIdleMode() } -func (i *idler) ExitIdleMode() error { - return (*ClientConn)(i).exitIdleMode() +func (i *idler) ExitIdleMode() { + // Ignore the error returned from this method, because from the perspective + // of the caller (idleness manager), the channel would have always moved out + // of IDLE by the time this method returns. + (*ClientConn)(i).exitIdleMode() } // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. This should never be called directly; use // cc.idlenessMgr.ExitIdleMode instead. -func (cc *ClientConn) exitIdleMode() (err error) { +func (cc *ClientConn) exitIdleMode() error { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() @@ -354,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) { } cc.mu.Unlock() + // Set state to CONNECTING before building the name resolver + // so the channel does not remain in IDLE. + cc.csMgr.updateState(connectivity.Connecting) + // This needs to be called without cc.mu because this builds a new resolver // which might update state or report error inline, which would then need to // acquire cc.mu. if err := cc.resolverWrapper.start(); err != nil { - return err + // If resolver creation fails, treat it like an error reported by the + // resolver before any valid updates. Set channel's state to + // TransientFailure, and set an erroring picker with the resolver build + // error, which will returned as part of any subsequent RPCs. + logger.Warningf("Failed to start resolver: %v", err) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Lock() + cc.updateResolverStateAndUnlock(resolver.State{}, err) + return fmt.Errorf("failed to start resolver: %w", err) } cc.addTraceEvent("exiting idle mode") @@ -456,7 +511,7 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) cc.channelz = channelz.RegisterChannel(parentChannel, target) - cc.addTraceEvent("created") + cc.addTraceEvent(fmt.Sprintf("created for target %q", target)) } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -621,7 +676,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -678,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - cc.addTraceEvent(err.Error()) - return - } + cc.idlenessMgr.ExitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. cc.mu.Lock() @@ -732,8 +786,8 @@ func init() { internal.EnterIdleModeForTesting = func(cc *ClientConn) { cc.idlenessMgr.EnterIdleModeForTesting() } - internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.idlenessMgr.ExitIdleMode() + internal.ExitIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.ExitIdleMode() } } @@ -858,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), } + ac.updateTelemetryLabelsLocked() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if // we connect to different addresses. @@ -974,7 +1029,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { } ac.addrs = addrs - + ac.updateTelemetryLabelsLocked() if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -1213,6 +1268,9 @@ type addrConn struct { resetBackoff chan struct{} channelz *channelz.SubChannel + + localityLabel string + backendServiceLabel string } // Note: this requires a lock on ac.mu. @@ -1220,6 +1278,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + + // If we are transitioning out of Ready, it means there is a disconnection. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) { + disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown") + openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel) + } ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1277,6 +1347,15 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + if !errors.Is(err, context.Canceled) { + connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel) + } else { + if logger.V(2) { + // This records cancelled connection attempts which can be later + // replaced by a metric. + logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.") + } + } // TODO: #7534 - Move re-resolution requests into the pick_first LB policy // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1316,10 +1395,50 @@ func (ac *addrConn) resetTransportAndUnlock() { } // Success; reset backoff. ac.mu.Lock() + connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel) + openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel) ac.backoffIdx = 0 ac.mu.Unlock() } +// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the +// first address in addrConn. +func (ac *addrConn) updateTelemetryLabelsLocked() { + labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string) + if !ok || len(ac.addrs) == 0 { + // Reset defaults + ac.localityLabel = "" + ac.backendServiceLabel = "" + return + } + labels := labelsFunc(ac.addrs[0]) + ac.localityLabel = labels["grpc.lb.locality"] + ac.backendServiceLabel = labels["grpc.lb.backend_service"] +} + +type securityLevelKey struct{} + +func (ac *addrConn) securityLevelLocked() string { + var secLevel string + // During disconnection, ac.transport is nil. Fall back to the security level + // stored in the current address during connection. + if ac.transport == nil { + secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string) + return secLevel + } + authInfo := ac.transport.Peer().AuthInfo + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel = ci.GetCommonAuthInfo().SecurityLevel.String() + // Store the security level in the current address' attributes so + // that it remains available for disconnection metrics after the + // transport is closed. + ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel) + } + return secLevel +} + // tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index c8e337cdd..06f6c6c70 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae142..dadd21e40 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 000000000..ee9acb437 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319dd..1ab874c7a 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } + // Important: if we remove this Size call then we cannot use + // UseCachedSize in MarshalOptions below. size := proto.Size(vv) + + // MarshalOptions with UseCachedSize allows reusing the result from the + // previous Size call. This is safe here because: + // + // 1. We just computed the size. + // 2. We assume the message is not being mutated concurrently. + // + // Important: If the proto.Size call above is removed, using UseCachedSize + // becomes unsafe and may lead to incorrect marshaling. + // + // For more details, see the doc of UseCachedSize: + // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions + marshalOptions := proto.MarshalOptions{UseCachedSize: true} + if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) + buf, err := marshalOptions.Marshal(vv) if err != nil { return nil, err } @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { } else { pool := mem.DefaultBufferPool() buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil { pool.Put(buf) return nil, err } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a1..472813f58 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,8 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount + MetricTypeIntAsyncGauge ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +95,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -154,6 +173,30 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Gauge(h, incr, labels...) } +// AsyncMetric is a marker interface for asynchronous metric types. +type AsyncMetric interface { + isAsync() + Descriptor() *MetricDescriptor +} + +// Int64AsyncGaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64AsyncGaugeHandle MetricDescriptor + +// isAsync implements the AsyncMetric interface. +func (h *Int64AsyncGaugeHandle) isAsync() {} + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64AsyncGaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 gauge value on the metrics recorder provided. +func (h *Int64AsyncGaugeHandle) Record(recorder AsyncMetricsRecorder, value int64, labels ...string) { + recorder.RecordInt64AsyncGauge(h, value, labels...) +} + // registeredMetrics are the registered metric descriptor names. var registeredMetrics = make(map[string]bool) @@ -249,6 +292,35 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + +// RegisterInt64AsyncGauge registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64AsyncGauge(descriptor MetricDescriptor) *Int64AsyncGaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntAsyncGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64AsyncGaugeHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee1423605..d7d404cbe 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,16 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) +} + +// AsyncMetricsRecorder records on asynchronous metrics derived from metric registry. +type AsyncMetricsRecorder interface { + // RecordInt64AsyncGauge records the measurement alongside labels on the int + // count associated with the provided handle asynchronously + RecordInt64AsyncGauge(handle *Int64AsyncGaugeHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 22d263fb9..8f7d9f6bb 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f2c01f296..e99cd5c83 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index ba25b8988..f38de74a4 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -67,6 +67,10 @@ type Balancer struct { // balancerCurrent before the UpdateSubConnState is called on the // balancerCurrent. currentMu sync.Mutex + + // activeGoroutines tracks all the goroutines that this balancer has started + // and that should be waited on when the balancer closes. + activeGoroutines sync.WaitGroup } // swap swaps out the current lb with the pending lb and updates the ClientConn. @@ -76,7 +80,9 @@ func (gsb *Balancer) swap() { cur := gsb.balancerCurrent gsb.balancerCurrent = gsb.balancerPending gsb.balancerPending = nil + gsb.activeGoroutines.Add(1) go func() { + defer gsb.activeGoroutines.Done() gsb.currentMu.Lock() defer gsb.currentMu.Unlock() cur.Close() @@ -274,6 +280,7 @@ func (gsb *Balancer) Close() { currentBalancerToClose.Close() pendingBalancerToClose.Close() + gsb.activeGoroutines.Wait() } // balancerWrapper wraps a balancer.Balancer, and overrides some Balancer @@ -324,7 +331,12 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { defer bw.gsb.mu.Unlock() bw.lastState = state + // If Close() acquires the mutex before UpdateState(), the balancer + // will already have been removed from the current or pending state when + // reaching this point. if !bw.gsb.balancerCurrentOrPending(bw) { + // Returning here ensures that (*Balancer).swap() is not invoked after + // (*Balancer).Close() and therefore prevents "use after close". return } diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668a..467392b8d 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -83,6 +83,7 @@ func (b *Unbounded) Load() { default: } } else if b.closing && !b.closed { + b.closed = true close(b.c) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 2bffe4777..3b7ba5966 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -194,7 +194,7 @@ func (r RefChannelType) String() string { // If channelz is not turned ON, this will simply log the event descriptions. func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { // Log only the trace description associated with the bottom most entity. - d := fmt.Sprintf("[%s]%s", e, desc.Desc) + d := fmt.Sprintf("[%s] %s", e, desc.Desc) switch desc.Severity { case CtUnknown, CtInfo: l.InfoDepth(depth+1, d) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7e060f5ed..6414ee4bb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -52,12 +52,6 @@ var ( // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "false". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) - // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the @@ -75,6 +69,19 @@ var ( // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) + + // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled. + // This feature is defined in gRFC A81 and is enabled by setting the + // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true". + XDSAuthorityRewrite = boolFromEnv("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index e87551552..7685d08b5 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -68,4 +68,15 @@ var ( // trust. For more details, see: // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) + + // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata + // configuring use of an HTTP CONNECT proxy via xDS from cluster resources. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md + XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7617be215..c90cc51bd 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -25,4 +25,8 @@ var ( // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + + // AcceptCompressors is implemented by the grpc package and returns + // a call option that restricts the grpc-accept-encoding header for a call. + AcceptCompressors any // func(...string) grpc.CallOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 8e8e86128..9b6d8a1fa 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func (cs *CallbackSerializer) run(ctx context.Context) { defer close(cs.done) - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } + // Close the buffer when the context is canceled + // to prevent new callbacks from being added. + context.AfterFunc(ctx, cs.callbacks.Close) - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() - - // Run all pending callbacks. + // Run all callbacks. for cb := range cs.callbacks.Get() { cs.callbacks.Load() cb.(func(context.Context))(ctx) diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 2c13ee9da..d3cd24f80 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -21,7 +21,6 @@ package idle import ( - "fmt" "math" "sync" "sync/atomic" @@ -33,15 +32,15 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// Enforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type Enforcer interface { - ExitIdleMode() error +// ClientConn is the functionality provided by grpc.ClientConn to enter and exit +// from idle mode. +type ClientConn interface { + ExitIdleMode() EnterIdleMode() } -// Manager implements idleness detection and calls the configured Enforcer to -// enter/exit idle mode when appropriate. Must be created by NewManager. +// Manager implements idleness detection and calls the ClientConn to enter/exit +// idle mode when appropriate. Must be created by NewManager. type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. @@ -51,8 +50,8 @@ type Manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout time.Duration + cc ClientConn // Functionality provided by grpc.ClientConn. + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -72,9 +71,9 @@ type Manager struct { // NewManager creates a new idleness manager implementation for the // given idle timeout. It begins in idle mode. -func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { +func NewManager(cc ClientConn, timeout time.Duration) *Manager { return &Manager{ - enforcer: enforcer, + cc: cc, timeout: timeout, actuallyIdle: true, activeCallsCount: -math.MaxInt32, @@ -127,7 +126,7 @@ func (m *Manager) handleIdleTimeout() { // Now that we've checked that there has been no activity, attempt to enter // idle mode, which is very likely to succeed. - if m.tryEnterIdleMode() { + if m.tryEnterIdleMode(true) { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -142,10 +141,13 @@ func (m *Manager) handleIdleTimeout() { // that, it performs a last minute check to ensure that no new RPC has come in, // making the channel active. // +// checkActivity controls if a check for RPC activity, since the last time the +// idle_timeout fired, is made. + // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *Manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode(checkActivity bool) bool { // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() // that the channel is either in idle mode or is trying to get there. if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { @@ -166,7 +168,7 @@ func (m *Manager) tryEnterIdleMode() bool { atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + if checkActivity && atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -177,44 +179,37 @@ func (m *Manager) tryEnterIdleMode() bool { // No new RPCs have come in since we set the active calls count value to // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode // unconditionally now. - m.enforcer.EnterIdleMode() + m.cc.EnterIdleMode() m.actuallyIdle = true return true } // EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { - m.tryEnterIdleMode() + m.tryEnterIdleMode(false) } // OnCallBegin is invoked at the start of every RPC. -func (m *Manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() { if m.isClosed() { - return nil + return } if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil + return } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.ExitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&m.activeCallsCount, -1) - return err - } - + m.ExitIdleMode() atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil } -// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// ExitIdleMode instructs m to call the ClientConn's ExitIdleMode and update its // internal state. -func (m *Manager) ExitIdleMode() error { +func (m *Manager) ExitIdleMode() { // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() @@ -231,12 +226,10 @@ func (m *Manager) ExitIdleMode() error { // m.ExitIdleMode. // // In any case, there is nothing to do here. - return nil + return } - if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("failed to exit idle mode: %w", err) - } + m.cc.ExitIdleMode() // Undo the idle entry process. This also respects any new RPC attempts. atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) @@ -244,7 +237,23 @@ func (m *Manager) ExitIdleMode() error { // Start a new timer to fire after the configured idle timeout. m.resetIdleTimerLocked(m.timeout) - return nil +} + +// UnsafeSetNotIdle instructs the Manager to update its internal state to +// reflect the reality that the channel is no longer in IDLE mode. +// +// N.B. This method is intended only for internal use by the gRPC client +// when it exits IDLE mode **manually** from `Dial`. The callsite must ensure: +// - The channel was **actually in IDLE mode** immediately prior to the call. +// - There is **no concurrent activity** that could cause the channel to exit +// IDLE mode *naturally* at the same time. +func (m *Manager) UnsafeSetNotIdle() { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + m.resetIdleTimerLocked(m.timeout) } // OnCallEnd is invoked at the end of every RPC. diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 2699223a2..27bef83d9 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -244,6 +244,10 @@ var ( // When set, the function will be called before the stream enters // the blocking state. NewStreamWaitingForResolver = func() {} + + // AddressToTelemetryLabels is an xDS-provided function to extract telemetry + // labels from a resolver.Address. Callers must assert its type before calling. + AddressToTelemetryLabels any // func(addr resolver.Address) map[string]string ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 20b8fb098..5bfa67b72 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,11 +22,13 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport/networktype" @@ -40,6 +42,8 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as // an intermediary between the gRPC ClientConn and the child resolvers. @@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { r.targetResolverState = &resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } r.updateTargetResolverState(*r.targetResolverState) return r, nil @@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool { return false } +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + func skipProxy(address resolver.Address) bool { // Avoid proxy when network is not tcp. networkType, ok := networktype.Get(address) diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657b..d5f7e4d62 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 000000000..49019b80d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index ccc0e017e..980452519 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index a2831e5d0..2dcd1e63b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -496,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -530,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } l.estdStreams[h.streamID] = str } @@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } return l.originateStream(str, h) } @@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } - reader := str.reader + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. if !dataItem.processing { dataItem.processing = true - str.reader.Reset(dataItem.data) + reader.Reset(dataItem.data) dataItem.data.Free() } // A data item is represented by a dataFrame, since it later translates into @@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) { if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) { remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224e..7cfbc9637 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d954a64c3..7ab3422b8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,15 +274,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. s.hdrMu.Lock() - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) @@ -374,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -411,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -424,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5467fe971..38ca031af 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -369,7 +370,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.ctx = peer.NewContext(t.ctx, t.Peer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -492,31 +491,26 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } -func (t *http2Client) getPeer() *peer.Peer { +func (t *http2Client) Peer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, AuthInfo: t.authInfo, // Can be nil @@ -556,6 +550,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) + registeredCompressors := t.registeredCompressors + if callHdr.AcceptedCompressors != nil { + registeredCompressors = *callHdr.AcceptedCompressors + } + if callHdr.PreviousAttempts > 0 { + hfLen++ + } + if callHdr.SendCompress != "" { + hfLen++ + } + if registeredCompressors != "" { + hfLen++ + } + if _, ok := ctx.Deadline(); ok { + hfLen++ + } headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) @@ -568,7 +578,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } - registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) // Include the outgoing compressor name when compressor is not registered @@ -736,7 +745,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { - ctx = peer.NewContext(ctx, t.getPeer()) + ctx = peer.NewContext(ctx, t.Peer()) // ServerName field of the resolver returned address takes precedence over // Host field of CallHdr to determine the :authority header. This is because, @@ -811,7 +820,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -842,7 +851,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -893,27 +902,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -990,6 +995,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1051,11 +1059,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1166,7 +1173,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1210,22 +1217,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1465,17 +1465,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1491,35 +1488,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { case "grpc-status": code, err := strconv.ParseInt(hf.Value, 10, 32) if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + se := status.New(codes.Unknown, fmt.Sprintf("transport: malformed grpc-status: %v", err)) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1534,25 +1511,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1583,22 +1587,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1606,7 +1608,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1653,7 +1655,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1668,7 +1670,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1686,8 +1688,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -1807,8 +1810,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } - func (t *http2Client) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83cee314c..6f78a6b0c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,8 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" @@ -42,7 +44,6 @@ import ( istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -86,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, @@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } @@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index e3663f87f..5bbb641ad 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,29 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +426,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +438,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index cf8da0b52..ed6a13b75 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,7 +32,7 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) @@ -43,12 +43,13 @@ type ServerStream struct { // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7dd53e80a..6daf1e002 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -529,6 +553,12 @@ type CallHdr struct { // outbound message. SendCompress string + // AcceptedCompressors overrides the grpc-accept-encoding header for this + // call. When nil, the transport advertises the default set of registered + // compressors. A non-nil pointer overrides that value (including the empty + // string to advertise none). + AcceptedCompressors *string + // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials @@ -584,8 +614,9 @@ type ClientTransport interface { // with a human readable string with debug info. GetGoAwayReason() (GoAwayReason, string) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns information about the peer associated with the Transport. + // The returned information includes authentication and network address details. + Peer() *peer.Peer } // ServerTransport is the common interface for all gRPC server-side transport @@ -615,6 +646,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c02..e37afdd19 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,12 +32,17 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } +const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2. + var defaultBufferPoolSizes = []int{ 256, - 4 << 10, // 4KB (go page size) + goPageSize, 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) 32 << 10, // 32KB (default buffer size for io.Copy) 1 << 20, // 1MB @@ -118,7 +123,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +146,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +163,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } @@ -170,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte { p.pool.Put(bs) } - b := make([]byte, size) + // If we're going to allocate, round up to the nearest page. This way if + // requests frequently arrive with small variation we don't allocate + // repeatedly if we get unlucky and they increase over time. By default we + // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we + // can round up efficiently. + allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) + + b := make([]byte, size, allocSize) return &b } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index af510d20c..084fb19c6 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int - // Reset frees the currently held buffer slice and starts reading from the - // provided slice. This allows reusing the reader object. - Reset(s BufferSlice) -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Reset(s BufferSlice) { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { r.data.Free() s.Ref() r.data = s @@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) { r.bufferIdx = 0 } -func (r *sliceReader) Close() error { +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -290,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969a..1e783febf 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 80e16a327..6e6137643 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -69,6 +69,7 @@ func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { + errCh <- ctx.Err() return } opts := resolver.BuildOptions{ diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 47ea09f5c..8160f9430 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -33,6 +33,8 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" @@ -41,6 +43,10 @@ import ( "google.golang.org/grpc/status" ) +func init() { + internal.AcceptCompressors = acceptCompressors +} + // Compressor defines the interface gRPC uses to compress a message. // // Deprecated: use package encoding. @@ -151,16 +157,32 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorName string - failFast bool - maxReceiveMessageSize *int - maxSendMessageSize *int - creds credentials.PerRPCCredentials - contentSubtype string - codec baseCodec - maxRetryRPCBufferSize int - onFinish []func(err error) - authority string + compressorName string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) + authority string + acceptedResponseCompressors []string +} + +func acceptedCompressorAllows(allowed []string, name string) bool { + if allowed == nil { + return true + } + if name == "" || name == encoding.Identity { + return true + } + for _, a := range allowed { + if a == name { + return true + } + } + return false } func defaultCallInfo() *callInfo { @@ -170,6 +192,29 @@ func defaultCallInfo() *callInfo { } } +func newAcceptedCompressionConfig(names []string) ([]string, error) { + if len(names) == 0 { + return nil, nil + } + var allowed []string + seen := make(map[string]struct{}, len(names)) + for _, name := range names { + name = strings.TrimSpace(name) + if name == "" || name == encoding.Identity { + continue + } + if !grpcutil.IsCompressorNameRegistered(name) { + return nil, status.Errorf(codes.InvalidArgument, "grpc: compressor %q is not registered", name) + } + if _, dup := seen[name]; dup { + continue + } + seen[name] = struct{}{} + allowed = append(allowed, name) + } + return allowed, nil +} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -471,6 +516,31 @@ func (o CompressorCallOption) before(c *callInfo) error { } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} +// acceptCompressors returns a CallOption that limits the compression algorithms +// advertised in the grpc-accept-encoding header for response messages. +// Compression algorithms not in the provided list will not be advertised, and +// responses compressed with non-listed algorithms will be rejected. +func acceptCompressors(names ...string) CallOption { + cp := append([]string(nil), names...) + return acceptCompressorsCallOption{names: cp} +} + +// acceptCompressorsCallOption is a CallOption that limits response compression. +type acceptCompressorsCallOption struct { + names []string +} + +func (o acceptCompressorsCallOption) before(c *callInfo) error { + allowed, err := newAcceptedCompressionConfig(o.names) + if err != nil { + return err + } + c.acceptedResponseCompressors = allowed + return nil +} + +func (acceptCompressorsCallOption) after(*callInfo, *csAttempt) {} + // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over // the wire will be "application/grpc+json". The content-subtype is converted @@ -657,8 +727,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -845,8 +927,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, -) (out mem.BufferSlice, err error) { +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err @@ -949,7 +1030,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -968,7 +1049,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1da2a542a..ddd377341 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d9bbd4c57..ec9577b27 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -25,6 +25,7 @@ import ( "math" rand "math/rand/v2" "strconv" + "strings" "sync" "time" @@ -177,13 +178,43 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + +// endOfClientStream performs cleanup actions required for both successful and +// failed streams. This includes incrementing channelz stats and invoking all +// registered OnFinish call options. +func endOfClientStream(cc *ClientConn, err error, opts ...CallOption) { + if channelz.IsOn() { + if err != nil { + cc.incrCallsFailed() + } else { + cc.incrCallsSucceeded() + } + } + + for _, o := range opts { + if o, ok := o.(OnFinishCallOption); ok { + o.OnFinish(err) + } + } +} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + } + defer func() { + if err != nil { + // Ensure cleanup when stream creation fails. + endOfClientStream(cc, err, opts...) + } + }() + // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to // track active RPC count. - if err := cc.idlenessMgr.OnCallBegin(); err != nil { - return nil, err - } + cc.idlenessMgr.OnCallBegin() + // Add a calloption, to decrement the active call count, that gets executed // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) @@ -202,14 +233,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } } } - if channelz.IsOn() { - cc.incrCallsStarted() - defer func() { - if err != nil { - cc.incrCallsFailed() - } - }() - } // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) @@ -217,7 +240,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) @@ -240,7 +263,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -258,7 +281,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -299,6 +322,10 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client DoneFunc: doneFunc, Authority: callInfo.authority, } + if allowed := callInfo.acceptedResponseCompressors; len(allowed) > 0 { + headerValue := strings.Join(allowed, ",") + callHdr.AcceptedCompressors = &headerValue + } // Set our outgoing compression according to the UseCompressor CallOption, if // set. In that case, also find the compressor from the encoding package. @@ -325,7 +352,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs := &clientStream{ callHdr: callHdr, ctx: ctx, - methodConfig: &mc, + methodConfig: mc, opts: opts, callInfo: callInfo, cc: cc, @@ -418,19 +445,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -461,7 +490,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -480,12 +509,10 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.Peer().Addr) } - if pick.blocked { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) - } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) } return nil } @@ -529,7 +556,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -549,6 +576,8 @@ type clientStream struct { sentLast bool // sent an end stream + receivedFirstMsg bool // set after the first message is received + methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing @@ -599,7 +628,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -613,8 +642,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -1038,9 +1067,6 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true - for _, onFinish := range cs.callInfo.onFinish { - onFinish(err) - } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -1080,13 +1106,7 @@ func (cs *clientStream) finish(err error) { if err == nil { cs.retryThrottler.successfulRPC() } - if channelz.IsOn() { - if err != nil { - cs.cc.incrCallsFailed() - } else { - cs.cc.incrCallsSucceeded() - } - } + endOfClientStream(cs.cc, err, cs.opts...) cs.cancel() } @@ -1108,17 +1128,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1132,6 +1150,10 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { a.decompressorV0 = nil a.decompressorV1 = encoding.GetCompressor(ct) } + // Validate that the compression method is acceptable for this call. + if !acceptedCompressorAllows(cs.callInfo.acceptedResponseCompressors, ct) { + return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct) + } } else { // No compression is used; disable our decompressor. a.decompressorV0 = nil @@ -1139,16 +1161,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !cs.desc.ServerStreams && !cs.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + cs.receivedFirstMsg = true if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { @@ -1156,8 +1183,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1172,12 +1199,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1210,15 +1237,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1324,7 +1350,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1359,6 +1385,7 @@ type addrConnStream struct { transport transport.ClientTransport ctx context.Context sentLast bool + receivedFirstMsg bool desc *StreamDesc codec baseCodec sendCompressorV0 Compressor @@ -1366,7 +1393,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1472,6 +1499,10 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { as.decompressorV0 = nil as.decompressorV1 = encoding.GetCompressor(ct) } + // Validate that the compression method is acceptable for this call. + if !acceptedCompressorAllows(as.callInfo.acceptedResponseCompressors, ct) { + return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct) + } } else { // No compression is used; disable our decompressor. as.decompressorV0 = nil @@ -1479,15 +1510,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !as.desc.ServerStreams && !as.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + as.receivedFirstMsg = true if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. @@ -1496,12 +1532,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1584,7 +1620,7 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec desc *StreamDesc @@ -1601,7 +1637,7 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1737,10 +1773,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1771,11 +1805,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1795,16 +1829,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return toRPCErr(err) } ss.recvFirstMsg = true - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1821,7 +1853,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } // Special handling for non-client-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { return nil } else if err != nil { return err diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 468f11065..ff7840fd8 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.75.1" +const Version = "1.78.0" diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 669133d04..c96e44834 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,7 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 - f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures + packed := false for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -108,7 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.EditionFeatures.IsPacked = true + packed = true case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -121,6 +121,13 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri tag = strings.TrimPrefix(tag[i:], ",") } + // Update EditionFeatures after the loop and after we know whether this is + // a proto2 or proto3 field. + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures + if packed { + f.L1.EditionFeatures.IsPacked = true + } + // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. if f.L1.Kind == protoreflect.GroupKind { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 099b2bf45..9aa7a9bb7 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -424,27 +424,34 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } -// parseTypeName parses Any type URL or extension field name. The name is -// enclosed in [ and ] characters. The C++ parser does not handle many legal URL -// strings. This implementation is more liberal and allows for the pattern -// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed -// in between [ ], '.', '/' and the sub names. +// parseTypeName parses an Any type URL or an extension field name. The name is +// enclosed in [ and ] characters. We allow almost arbitrary type URL prefixes, +// closely following the text-format spec [1,2]. We implement "ExtensionName | +// AnyName" as follows (with some exceptions for backwards compatibility): +// +// char = [-_a-zA-Z0-9] +// url_char = char | [.~!$&'()*+,;=] | "%", hex, hex +// +// Ident = char, { char } +// TypeName = Ident, { ".", Ident } ; +// UrlPrefix = url_char, { url_char | "/" } ; +// ExtensionName = "[", TypeName, "]" ; +// AnyName = "[", UrlPrefix, "/", TypeName, "]" ; +// +// Additionally, we allow arbitrary whitespace and comments between [ and ]. +// +// [1] https://protobuf.dev/reference/protobuf/textformat-spec/#characters +// [2] https://protobuf.dev/reference/protobuf/textformat-spec/#field-names func (d *Decoder) parseTypeName() (Token, error) { - startPos := len(d.orig) - len(d.in) // Use alias s to advance first in order to use d.in for error handling. - // Caller already checks for [ as first character. + // Caller already checks for [ as first character (d.in[0] == '['). s := consume(d.in[1:], 0) if len(s) == 0 { return Token{}, ErrUnexpectedEOF } + // Collect everything between [ and ] in name. var name []byte - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] - } - s = consume(s, 0) - var closed bool for len(s) > 0 && !closed { switch { @@ -452,23 +459,20 @@ func (d *Decoder) parseTypeName() (Token, error) { s = s[1:] closed = true - case s[0] == '/', s[0] == '.': - if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)+1]) - } + case s[0] == '/' || isTypeNameChar(s[0]) || isUrlExtraChar(s[0]): name = append(name, s[0]) - s = s[1:] - s = consume(s, 0) - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] + s = consume(s[1:], 0) + + // URL percent-encoded chars + case s[0] == '%': + if len(s) < 3 || !isHexChar(s[1]) || !isHexChar(s[2]) { + return Token{}, d.parseTypeNameError(s, 3) } - s = consume(s, 0) + name = append(name, s[0], s[1], s[2]) + s = consume(s[3:], 0) default: - return Token{}, d.newSyntaxError( - "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + return Token{}, d.parseTypeNameError(s, 1) } } @@ -476,15 +480,38 @@ func (d *Decoder) parseTypeName() (Token, error) { return Token{}, ErrUnexpectedEOF } - // First character cannot be '.'. Last character cannot be '.' or '/'. - size := len(name) - if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)]) + // Split collected name on last '/' into urlPrefix and typeName (if '/' is + // present). + typeName := name + if i := bytes.LastIndexByte(name, '/'); i != -1 { + urlPrefix := name[:i] + typeName = name[i+1:] + + // urlPrefix may be empty (for backwards compatibility). + // If non-empty, it must not start with '/'. + if len(urlPrefix) > 0 && urlPrefix[0] == '/' { + return Token{}, d.parseTypeNameError(s, 0) + } } + // typeName must not be empty (note: "" splits to [""]) and all identifier + // parts must not be empty. + for _, ident := range bytes.Split(typeName, []byte{'.'}) { + if len(ident) == 0 { + return Token{}, d.parseTypeNameError(s, 0) + } + } + + // typeName must not contain any percent-encoded or special URL chars. + for _, b := range typeName { + if b == '%' || (b != '.' && isUrlExtraChar(b)) { + return Token{}, d.parseTypeNameError(s, 0) + } + } + + startPos := len(d.orig) - len(d.in) + endPos := len(d.orig) - len(s) d.in = s - endPos := len(d.orig) - len(d.in) d.consume(0) return Token{ @@ -496,16 +523,32 @@ func (d *Decoder) parseTypeName() (Token, error) { }, nil } -func isTypeNameChar(b byte) bool { - return (b == '-' || b == '_' || - ('0' <= b && b <= '9') || - ('a' <= b && b <= 'z') || - ('A' <= b && b <= 'Z')) +func (d *Decoder) parseTypeNameError(s []byte, numUnconsumedChars int) error { + return d.newSyntaxError( + "invalid type URL/extension field name: %s", + d.in[:len(d.in)-len(s)+min(numUnconsumedChars, len(s))], + ) } -func isWhiteSpace(b byte) bool { +func isHexChar(b byte) bool { + return ('0' <= b && b <= '9') || + ('a' <= b && b <= 'f') || + ('A' <= b && b <= 'F') +} + +func isTypeNameChar(b byte) bool { + return b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z') +} + +// isUrlExtraChar complements isTypeNameChar with extra characters that we allow +// in URLs but not in type names. Note that '/' is not included so that it can +// be treated specially. +func isUrlExtraChar(b byte) bool { switch b { - case ' ', '\n', '\r', '\t': + case '.', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=': return true default: return false diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 688aabe43..c775e5832 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto3 Edition = 999 Edition2023 Edition = 1000 Edition2024 Edition = 1001 + EditionUnstable Edition = 9999 EditionUnsupported Edition = 100000 ) @@ -72,9 +73,10 @@ type ( EditionFeatures EditionFeatures } FileL2 struct { - Options func() protoreflect.ProtoMessage - Imports FileImports - Locations SourceLocations + Options func() protoreflect.ProtoMessage + Imports FileImports + OptionImports func() protoreflect.FileImports + Locations SourceLocations } // EditionFeatures is a frequently-instantiated struct, so please take care @@ -126,12 +128,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -150,6 +149,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD func (fd *File) ProtoType(protoreflect.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct +// the original FileDescriptor proto. +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) OptionImports() protoreflect.FileImports { + if f := fd.lazyInit().OptionImports; f != nil { + return f() + } + return emptyFiles +} + func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { fd.lazyInitOnce() @@ -182,9 +191,9 @@ type ( L2 *EnumL2 // protected by fileDesc.once } EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - EditionFeatures EditionFeatures + Visibility int32 + eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -219,6 +228,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit() func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + +// This is not part of the EnumDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility } + func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 @@ -244,13 +258,13 @@ type ( L2 *MessageL2 // protected by fileDesc.once } MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - + Enums Enums + Messages Messages + Extensions Extensions EditionFeatures EditionFeatures + Visibility int32 + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -319,6 +333,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + +// This is not part of the MessageDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (md *Message) Visibility() int32 { return md.L1.Visibility } + func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index d2f549497..e91860f5a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl case genid.EnumDescriptorProto_Value_field_number: numValues++ } + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Visibility_field_number: + ed.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.unmarshalSeedOptions(v) } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Visibility_field_number: + md.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index d4c94458b..78f02b1b4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) { var enumIdx, messageIdx, extensionIdx, serviceIdx int var rawOptions []byte + var optionImports []string fd.L2 = new(FileL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_OptionDependency_field_number: + optionImports = append(optionImports, sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) { } } fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) + if len(optionImports) > 0 { + var imps FileImports + var once sync.Once + fd.L2.OptionImports = func() protoreflect.FileImports { + once.Do(func() { + imps = make(FileImports, len(optionImports)) + for i, path := range optionImports { + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + imps[i] = protoreflect.FileImport{FileDescriptor: imp} + } + }) + return &imps + } + } } func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { @@ -310,7 +330,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ case genid.DescriptorProto_Options_field_number: - md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -336,27 +355,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) } -func (md *Message) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.MessageOptions_MapEntry_field_number: - md.L1.IsMapEntry = protowire.DecodeBool(v) - case genid.MessageOptions_MessageSetWireFormat_field_number: - md.L1.IsMessageSet = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 950a6a325..65aaf4d21 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -26,6 +26,7 @@ const ( Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_UNSTABLE_enum_value = 9999 Edition_EDITION_1_TEST_ONLY_enum_value = 1 Edition_EDITION_2_TEST_ONLY_enum_value = 2 Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 229c69801..4a3bf393e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -113,6 +113,9 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO } func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if opts.depth--; opts.depth < 0 { + return out, errRecursionDepth + } if wtyp != protowire.BytesType { return out, errUnknown } @@ -170,6 +173,9 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if opts.depth--; opts.depth < 0 { + return out, errRecursionDepth + } if wtyp != protowire.BytesType { return out, errUnknown } diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index e0dd21fa5..1228b5c8c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -102,8 +102,7 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() - opts.depth-- - if opts.depth < 0 { + if opts.depth--; opts.depth < 0 { return out, errRecursionDepth } if flags.ProtoLegacy && mi.isMessageSet { diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 7b2995dde..99a1eb95f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -68,9 +68,13 @@ func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out pr if in.Resolver == nil { in.Resolver = protoregistry.GlobalTypes } + if in.Depth == 0 { + in.Depth = protowire.DefaultRecursionLimit + } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) if o.initialized { out.Flags |= protoiface.UnmarshalInitialized @@ -257,6 +261,9 @@ func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmars states[0].typ = validationTypeGroup states[0].endGroup = groupTag } + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } initialized := true start := len(b) State: @@ -451,6 +458,13 @@ State: mi: vi.mi, tail: b, }) + if vi.typ == validationTypeMessage || + vi.typ == validationTypeGroup || + vi.typ == validationTypeMap { + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } + } b = v continue State case validationTypeRepeatedVarint: @@ -499,6 +513,9 @@ State: mi: vi.mi, endGroup: num, }) + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } continue State case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: typeid, v, n, err := messageset.ConsumeFieldValue(b, false) @@ -521,6 +538,13 @@ State: mi: xvi.mi, tail: b[n:], }) + if xvi.typ == validationTypeMessage || + xvi.typ == validationTypeGroup || + xvi.typ == validationTypeMap { + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } + } b = v continue State } @@ -547,12 +571,14 @@ State: switch st.typ { case validationTypeMessage, validationTypeGroup: numRequiredFields = int(st.mi.numRequiredFields) + opts.depth++ case validationTypeMap: // If this is a map field with a message value that contains // required fields, require that the value be present. if st.mi != nil && st.mi.numRequiredFields > 0 { numRequiredFields = 1 } + opts.depth++ } // If there are more than 64 required fields, this check will // always fail and we will report that the message is potentially diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 31e79a653..763fd8284 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 9 + Patch = 11 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4cbf1aeaf..889d8511d 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -121,9 +121,8 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto out, err = methods.Unmarshal(in) } else { - o.RecursionLimit-- - if o.RecursionLimit < 0 { - return out, errors.New("exceeded max recursion depth") + if o.RecursionLimit--; o.RecursionLimit < 0 { + return out, errRecursionDepth } err = o.unmarshalMessageSlow(b, m) } @@ -220,6 +219,9 @@ func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m pro } func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if o.RecursionLimit--; o.RecursionLimit < 0 { + return 0, errRecursionDepth + } if wtyp != protowire.BytesType { return 0, errUnknown } @@ -305,3 +307,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto var errUnknown = errors.New("BUG: internal error (unknown)") var errDecode = errors.New("cannot parse invalid wire-format data") + +var errRecursionDepth = errors.New("exceeded maximum recursion depth") diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 06d584c14..484c21fd5 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -172,13 +172,14 @@ import ( // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState `protogen:"open.v1"` - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. + // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must + // be between -315576000000 and 315576000000 inclusive (which corresponds to + // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z). Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 + // Non-negative fractions of a second at nanosecond resolution. This field is + // the nanosecond portion of the duration, not an alternative to seconds. + // Negative second values with fractions must still have non-negative nanos + // values that count forward in time. Must be between 0 and 999,999,999 // inclusive. Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` unknownFields protoimpl.UnknownFields diff --git a/vendor/modules.txt b/vendor/modules.txt index d5828a172..df9fc7fee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,7 +16,7 @@ github.com/AlecAivazis/survey/v2/terminal ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/BurntSushi/toml v1.5.0 +# github.com/BurntSushi/toml v1.6.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -68,8 +68,8 @@ github.com/cespare/xxhash/v2 # github.com/charmbracelet/bubbletea v1.3.10 ## explicit; go 1.24.0 github.com/charmbracelet/bubbletea -# github.com/charmbracelet/colorprofile v0.3.2 -## explicit; go 1.23.0 +# github.com/charmbracelet/colorprofile v0.4.1 +## explicit; go 1.24.2 github.com/charmbracelet/colorprofile # github.com/charmbracelet/lipgloss v1.1.0 ## explicit; go 1.18 @@ -78,23 +78,29 @@ github.com/charmbracelet/lipgloss/table # github.com/charmbracelet/log v0.4.2 ## explicit; go 1.19 github.com/charmbracelet/log -# github.com/charmbracelet/x/ansi v0.10.2 -## explicit; go 1.24.0 +# github.com/charmbracelet/x/ansi v0.11.4 +## explicit; go 1.24.2 github.com/charmbracelet/x/ansi github.com/charmbracelet/x/ansi/parser -# github.com/charmbracelet/x/cellbuf v0.0.13 -## explicit; go 1.18 +# github.com/charmbracelet/x/cellbuf v0.0.14 +## explicit; go 1.24.2 github.com/charmbracelet/x/cellbuf # github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 ## explicit; go 1.19 -# github.com/charmbracelet/x/term v0.2.1 -## explicit; go 1.18 +# github.com/charmbracelet/x/term v0.2.2 +## explicit; go 1.24.0 github.com/charmbracelet/x/term -# github.com/clipperhouse/uax29/v2 v2.2.0 +# github.com/clipperhouse/displaywidth v0.7.0 +## explicit; go 1.18 +github.com/clipperhouse/displaywidth +# github.com/clipperhouse/stringish v0.1.1 +## explicit; go 1.18 +github.com/clipperhouse/stringish +# github.com/clipperhouse/uax29/v2 v2.3.0 ## explicit; go 1.18 github.com/clipperhouse/uax29/v2/graphemes github.com/clipperhouse/uax29/v2/internal/iterators -# github.com/cloudflare/circl v1.6.1 +# github.com/cloudflare/circl v1.6.2 ## explicit; go 1.22.0 github.com/cloudflare/circl/dh/x25519 github.com/cloudflare/circl/dh/x448 @@ -141,19 +147,10 @@ github.com/containers/image/types # github.com/cpuguy83/go-md2man/v2 v2.0.7 ## explicit; go 1.12 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/cyphar/filepath-securejoin v0.5.0 +# github.com/cyphar/filepath-securejoin v0.6.1 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin github.com/cyphar/filepath-securejoin/internal/consts -github.com/cyphar/filepath-securejoin/pathrs-lite -github.com/cyphar/filepath-securejoin/pathrs-lite/internal -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux -github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs -github.com/cyphar/filepath-securejoin/pathrs-lite/procfs # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -163,7 +160,7 @@ github.com/decentral1se/passgen # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v28.4.0+incompatible +# github.com/docker/cli v29.1.5+incompatible ## explicit github.com/docker/cli/cli github.com/docker/cli/cli-plugins/metadata @@ -171,7 +168,6 @@ github.com/docker/cli/cli/command github.com/docker/cli/cli/command/formatter github.com/docker/cli/cli/command/formatter/tabwriter github.com/docker/cli/cli/command/service/progress -github.com/docker/cli/cli/command/stack/formatter github.com/docker/cli/cli/compose/interpolation github.com/docker/cli/cli/compose/loader github.com/docker/cli/cli/compose/schema @@ -213,7 +209,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v28.4.0+incompatible +# github.com/docker/docker v28.5.2+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -243,10 +239,8 @@ github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils github.com/docker/docker/pkg/jsonmessage -github.com/docker/docker/pkg/progress github.com/docker/docker/pkg/stdcopy -github.com/docker/docker/pkg/streamformatter -# github.com/docker/docker-credential-helpers v0.9.3 +# github.com/docker/docker-credential-helpers v0.9.5 ## explicit; go 1.21 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -290,15 +284,15 @@ github.com/go-git/gcfg github.com/go-git/gcfg/scanner github.com/go-git/gcfg/token github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.6.2 -## explicit; go 1.21 +# github.com/go-git/go-billy/v5 v5.7.0 +## explicit; go 1.23.0 github.com/go-git/go-billy/v5 github.com/go-git/go-billy/v5/helper/chroot github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.16.2 +# github.com/go-git/go-git/v5 v5.16.4 ## explicit; go 1.23.0 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config @@ -346,8 +340,8 @@ github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder github.com/go-git/go-git/v5/utils/sync github.com/go-git/go-git/v5/utils/trace -# github.com/go-logfmt/logfmt v0.6.0 -## explicit; go 1.17 +# github.com/go-logfmt/logfmt v0.6.1 +## explicit; go 1.21 github.com/go-logfmt/logfmt # github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 @@ -356,7 +350,7 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-viper/mapstructure/v2 v2.4.0 +# github.com/go-viper/mapstructure/v2 v2.5.0 ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors @@ -379,8 +373,8 @@ github.com/google/uuid # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 -## explicit; go 1.23.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 +## explicit; go 1.24.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -402,8 +396,8 @@ github.com/kballard/go-shellquote # github.com/kevinburke/ssh_config v1.4.0 ## explicit; go 1.18 github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.3 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -437,19 +431,46 @@ github.com/mattn/go-runewidth # github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d ## explicit github.com/mgutz/ansi -# github.com/miekg/pkcs11 v1.1.1 -## explicit; go 1.12 # github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db ## explicit github.com/mitchellh/colorstring # github.com/moby/docker-image-spec v1.3.1 ## explicit; go 1.18 github.com/moby/docker-image-spec/specs-go/v1 -# github.com/moby/go-archive v0.1.0 +# github.com/moby/go-archive v0.2.0 ## explicit; go 1.23.0 github.com/moby/go-archive github.com/moby/go-archive/compression github.com/moby/go-archive/tarheader +# github.com/moby/moby/api v1.53.0-rc.1 +## explicit; go 1.24.0 +github.com/moby/moby/api/pkg/authconfig +github.com/moby/moby/api/types +github.com/moby/moby/api/types/blkiodev +github.com/moby/moby/api/types/build +github.com/moby/moby/api/types/checkpoint +github.com/moby/moby/api/types/common +github.com/moby/moby/api/types/container +github.com/moby/moby/api/types/events +github.com/moby/moby/api/types/image +github.com/moby/moby/api/types/jsonstream +github.com/moby/moby/api/types/mount +github.com/moby/moby/api/types/network +github.com/moby/moby/api/types/plugin +github.com/moby/moby/api/types/registry +github.com/moby/moby/api/types/storage +github.com/moby/moby/api/types/swarm +github.com/moby/moby/api/types/system +github.com/moby/moby/api/types/volume +# github.com/moby/moby/client v0.2.1 +## explicit; go 1.24.0 +github.com/moby/moby/client +github.com/moby/moby/client/internal +github.com/moby/moby/client/internal/timestamp +github.com/moby/moby/client/pkg/progress +github.com/moby/moby/client/pkg/streamformatter +github.com/moby/moby/client/pkg/stringid +github.com/moby/moby/client/pkg/versions # github.com/moby/patternmatcher v0.6.0 ## explicit; go 1.19 github.com/moby/patternmatcher @@ -472,8 +493,8 @@ github.com/moby/sys/userns ## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows -# github.com/morikuni/aec v1.0.0 -## explicit +# github.com/morikuni/aec v1.1.0 +## explicit; go 1.21 github.com/morikuni/aec # github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 ## explicit; go 1.17 @@ -521,12 +542,12 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.66.1 -## explicit; go 1.23.0 +# github.com/prometheus/common v0.67.5 +## explicit; go 1.24.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.17.0 -## explicit; go 1.23.0 +# github.com/prometheus/procfs v0.19.2 +## explicit; go 1.24.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -536,19 +557,19 @@ github.com/rivo/uniseg # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 -# github.com/schollz/progressbar/v3 v3.18.0 +# github.com/schollz/progressbar/v3 v3.19.0 ## explicit; go 1.22 github.com/schollz/progressbar/v3 # github.com/sergi/go-diff v1.4.0 ## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch -# github.com/sirupsen/logrus v1.9.3 -## explicit; go 1.13 +# github.com/sirupsen/logrus v1.9.4 +## explicit; go 1.17 github.com/sirupsen/logrus -# github.com/skeema/knownhosts v1.3.1 -## explicit; go 1.22 +# github.com/skeema/knownhosts v1.3.2 +## explicit; go 1.24.0 github.com/skeema/knownhosts -# github.com/spf13/cobra v1.10.1 +# github.com/spf13/cobra v1.10.2 ## explicit; go 1.15 github.com/spf13/cobra github.com/spf13/cobra/doc @@ -559,8 +580,6 @@ github.com/spf13/pflag ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml -# github.com/theupdateframework/notary v0.7.0 -## explicit; go 1.12 # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent @@ -580,75 +599,80 @@ github.com/xo/terminfo ## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/otel v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 ## explicit; go 1.20 -# go.opentelemetry.io/otel/metric v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -go.opentelemetry.io/otel/sdk/trace/internal/x -# go.opentelemetry.io/otel/sdk/metric v1.38.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/sdk/trace/internal/env +go.opentelemetry.io/otel/sdk/trace/internal/observ +# go.opentelemetry.io/otel/sdk/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/observ +go.opentelemetry.io/otel/sdk/metric/internal/reservoir go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.8.0 +# go.opentelemetry.io/proto/otlp v1.9.0 ## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -659,7 +683,10 @@ go.opentelemetry.io/proto/otlp/trace/v1 # go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 -# golang.org/x/crypto v0.42.0 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 +# golang.org/x/crypto v0.47.0 ## explicit; go 1.24.0 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -677,13 +704,13 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20250911091902-df9299821621 +# golang.org/x/exp v0.0.0-20260112195511-716be5621a96 ## explicit; go 1.24.0 golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/net v0.44.0 +# golang.org/x/net v0.49.0 ## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -695,7 +722,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sys v0.36.0 +# golang.org/x/sys v0.40.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -703,10 +730,10 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.35.0 +# golang.org/x/term v0.39.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.29.0 +# golang.org/x/text v0.33.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/internal @@ -719,18 +746,18 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.13.0 +# golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 +# google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 ## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 ## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.75.1 -## explicit; go 1.23.0 +# google.golang.org/grpc v1.78.0 +## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -740,7 +767,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -750,6 +776,7 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog @@ -793,7 +820,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.9 +# google.golang.org/protobuf v1.36.11 ## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson