diff --git a/go.mod b/go.mod index 8af1e328..ee81b55b 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,15 @@ require ( coopcloud.tech/tagcmp v0.0.0-20250818180036-0ec1b205b5ca git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/charmbracelet/bubbletea v1.3.6 + github.com/charmbracelet/bubbles v0.21.0 + github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/lipgloss v1.1.0 github.com/charmbracelet/log v0.4.2 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.3.3+incompatible - github.com/docker/docker v28.3.3+incompatible + github.com/docker/cli v28.4.0+incompatible + github.com/docker/docker v28.4.0+incompatible github.com/docker/go-units v0.5.0 + github.com/evertras/bubble-table v0.19.2 github.com/go-git/go-git/v5 v5.16.2 github.com/google/go-cmp v0.7.0 github.com/leonelquinteros/gotext v1.7.2 @@ -22,7 +24,7 @@ require ( github.com/moby/term v0.5.2 github.com/pkg/errors v0.9.1 github.com/schollz/progressbar/v3 v3.18.0 - golang.org/x/term v0.34.0 + golang.org/x/term v0.35.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.5.2 ) @@ -33,22 +35,24 @@ require ( github.com/BurntSushi/toml v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/atotto/clipboard v0.1.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/x/ansi v0.10.1 // indirect + github.com/charmbracelet/x/ansi v0.10.2 // indirect github.com/charmbracelet/x/cellbuf v0.0.13 // indirect github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/clipperhouse/uax29/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect @@ -64,21 +68,21 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/kevinburke/ssh_config v1.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect @@ -90,47 +94,48 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/runc v1.1.13 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.1 // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/text v0.28.0 // indirect - golang.org/x/time v0.12.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect - google.golang.org/grpc v1.74.2 // indirect - google.golang.org/protobuf v1.36.7 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/time v0.13.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -147,11 +152,11 @@ require ( github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect github.com/sergi/go-diff v1.4.0 // indirect - github.com/spf13/cobra v1.9.1 - github.com/stretchr/testify v1.10.0 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 github.com/theupdateframework/notary v0.7.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - golang.org/x/sys v0.35.0 + golang.org/x/sys v0.36.0 ) diff --git a/go.sum b/go.sum index ceab1712..6e15adf5 100644 --- a/go.sum +++ b/go.sum @@ -99,6 +99,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= @@ -133,20 +135,22 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU= -github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc= +github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= +github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= +github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= +github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= -github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ= -github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= +github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= +github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30= -github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= @@ -164,6 +168,8 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY= +github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= @@ -296,8 +302,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw= +github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -316,16 +322,16 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo= -github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= -github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk= +github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= @@ -367,6 +373,8 @@ github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6 github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evertras/bubble-table v0.19.2 h1:u77oiM6JlRR+CvS5FZc3Hz+J6iEsvEDcR5kO8OFb1Yw= +github.com/evertras/bubble-table v0.19.2/go.mod h1:ifHujS1YxwnYSOgcR2+m3GnJ84f7CVU/4kUOxUCjEbQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -439,7 +447,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -529,8 +536,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -579,8 +586,8 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= +github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -590,6 +597,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -611,8 +620,8 @@ github.com/leonelquinteros/gotext v1.7.2 h1:bDPndU8nt+/kRo1m4l/1OXiiy2v7Z7dfPQ9+ github.com/leonelquinteros/gotext v1.7.2/go.mod h1:9/haCkm5P7Jay1sxKDGJ5WIg4zkz8oZKw4ekNpALob8= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -631,8 +640,9 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= @@ -692,6 +702,8 @@ github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -758,8 +770,8 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= -github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0= +github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -775,8 +787,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -790,8 +802,8 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -806,6 +818,7 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -851,8 +864,8 @@ github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -861,9 +874,9 @@ github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bd github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= @@ -878,8 +891,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -937,37 +950,39 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= -go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -986,8 +1001,8 @@ golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -998,8 +1013,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1063,8 +1078,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1082,8 +1097,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1162,13 +1175,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1178,16 +1191,16 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1237,6 +1250,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1281,10 +1296,10 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a h1:DMCgtIAIQGZqJXMVzJF4MV8BlWoJh2ZuFiRdAleyr58= -google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a/go.mod h1:y2yVLIE/CSMCPXaHnSKXxu1spLPnglFLegmgdY23uuE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a h1:tPE/Kp+x9dMSwUm/uM0JKK0IfdiJkwAbSMSeZBXXJXc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1304,8 +1319,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1319,8 +1334,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= diff --git a/vendor/github.com/atotto/clipboard/.travis.yml b/vendor/github.com/atotto/clipboard/.travis.yml new file mode 100644 index 00000000..23f21d83 --- /dev/null +++ b/vendor/github.com/atotto/clipboard/.travis.yml @@ -0,0 +1,22 @@ +language: go + +os: + - linux + - osx + - windows + +go: + - go1.13.x + - go1.x + +services: + - xvfb + +before_install: + - export DISPLAY=:99.0 + +script: + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install xsel; fi + - go test -v . + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get install xclip; fi + - go test -v . diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/github.com/atotto/clipboard/LICENSE similarity index 92% rename from vendor/golang.org/x/sync/LICENSE rename to vendor/github.com/atotto/clipboard/LICENSE index 2a7cf70d..dee3257b 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/github.com/atotto/clipboard/LICENSE @@ -1,4 +1,4 @@ -Copyright 2009 The Go Authors. +Copyright (c) 2013 Ato Araki. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of @atotto. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/atotto/clipboard/README.md b/vendor/github.com/atotto/clipboard/README.md new file mode 100644 index 00000000..41fdd57b --- /dev/null +++ b/vendor/github.com/atotto/clipboard/README.md @@ -0,0 +1,48 @@ +[![Build Status](https://travis-ci.org/atotto/clipboard.svg?branch=master)](https://travis-ci.org/atotto/clipboard) + +[![GoDoc](https://godoc.org/github.com/atotto/clipboard?status.svg)](http://godoc.org/github.com/atotto/clipboard) + +# Clipboard for Go + +Provide copying and pasting to the Clipboard for Go. + +Build: + + $ go get github.com/atotto/clipboard + +Platforms: + +* OSX +* Windows 7 (probably work on other Windows) +* Linux, Unix (requires 'xclip' or 'xsel' command to be installed) + + +Document: + +* http://godoc.org/github.com/atotto/clipboard + +Notes: + +* Text string only +* UTF-8 text encoding only (no conversion) + +TODO: + +* Clipboard watcher(?) + +## Commands: + +paste shell command: + + $ go get github.com/atotto/clipboard/cmd/gopaste + $ # example: + $ gopaste > document.txt + +copy shell command: + + $ go get github.com/atotto/clipboard/cmd/gocopy + $ # example: + $ cat document.txt | gocopy + + + diff --git a/vendor/github.com/atotto/clipboard/clipboard.go b/vendor/github.com/atotto/clipboard/clipboard.go new file mode 100644 index 00000000..d7907d3a --- /dev/null +++ b/vendor/github.com/atotto/clipboard/clipboard.go @@ -0,0 +1,20 @@ +// Copyright 2013 @atotto. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clipboard read/write on clipboard +package clipboard + +// ReadAll read string from clipboard +func ReadAll() (string, error) { + return readAll() +} + +// WriteAll write string to clipboard +func WriteAll(text string) error { + return writeAll(text) +} + +// Unsupported might be set true during clipboard init, to help callers decide +// whether or not to offer clipboard options. +var Unsupported bool diff --git a/vendor/github.com/atotto/clipboard/clipboard_darwin.go b/vendor/github.com/atotto/clipboard/clipboard_darwin.go new file mode 100644 index 00000000..6f33078d --- /dev/null +++ b/vendor/github.com/atotto/clipboard/clipboard_darwin.go @@ -0,0 +1,52 @@ +// Copyright 2013 @atotto. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package clipboard + +import ( + "os/exec" +) + +var ( + pasteCmdArgs = "pbpaste" + copyCmdArgs = "pbcopy" +) + +func getPasteCommand() *exec.Cmd { + return exec.Command(pasteCmdArgs) +} + +func getCopyCommand() *exec.Cmd { + return exec.Command(copyCmdArgs) +} + +func readAll() (string, error) { + pasteCmd := getPasteCommand() + out, err := pasteCmd.Output() + if err != nil { + return "", err + } + return string(out), nil +} + +func writeAll(text string) error { + copyCmd := getCopyCommand() + in, err := copyCmd.StdinPipe() + if err != nil { + return err + } + + if err := copyCmd.Start(); err != nil { + return err + } + if _, err := in.Write([]byte(text)); err != nil { + return err + } + if err := in.Close(); err != nil { + return err + } + return copyCmd.Wait() +} diff --git a/vendor/github.com/atotto/clipboard/clipboard_plan9.go b/vendor/github.com/atotto/clipboard/clipboard_plan9.go new file mode 100644 index 00000000..9d2fef4e --- /dev/null +++ b/vendor/github.com/atotto/clipboard/clipboard_plan9.go @@ -0,0 +1,42 @@ +// Copyright 2013 @atotto. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package clipboard + +import ( + "os" + "io/ioutil" +) + +func readAll() (string, error) { + f, err := os.Open("/dev/snarf") + if err != nil { + return "", err + } + defer f.Close() + + str, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return string(str), nil +} + +func writeAll(text string) error { + f, err := os.OpenFile("/dev/snarf", os.O_WRONLY, 0666) + if err != nil { + return err + } + defer f.Close() + + _, err = f.Write([]byte(text)) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/atotto/clipboard/clipboard_unix.go b/vendor/github.com/atotto/clipboard/clipboard_unix.go new file mode 100644 index 00000000..d9f6a561 --- /dev/null +++ b/vendor/github.com/atotto/clipboard/clipboard_unix.go @@ -0,0 +1,149 @@ +// Copyright 2013 @atotto. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd linux netbsd openbsd solaris dragonfly + +package clipboard + +import ( + "errors" + "os" + "os/exec" +) + +const ( + xsel = "xsel" + xclip = "xclip" + powershellExe = "powershell.exe" + clipExe = "clip.exe" + wlcopy = "wl-copy" + wlpaste = "wl-paste" + termuxClipboardGet = "termux-clipboard-get" + termuxClipboardSet = "termux-clipboard-set" +) + +var ( + Primary bool + trimDos bool + + pasteCmdArgs []string + copyCmdArgs []string + + xselPasteArgs = []string{xsel, "--output", "--clipboard"} + xselCopyArgs = []string{xsel, "--input", "--clipboard"} + + xclipPasteArgs = []string{xclip, "-out", "-selection", "clipboard"} + xclipCopyArgs = []string{xclip, "-in", "-selection", "clipboard"} + + powershellExePasteArgs = []string{powershellExe, "Get-Clipboard"} + clipExeCopyArgs = []string{clipExe} + + wlpasteArgs = []string{wlpaste, "--no-newline"} + wlcopyArgs = []string{wlcopy} + + termuxPasteArgs = []string{termuxClipboardGet} + termuxCopyArgs = []string{termuxClipboardSet} + + missingCommands = errors.New("No clipboard utilities available. Please install xsel, xclip, wl-clipboard or Termux:API add-on for termux-clipboard-get/set.") +) + +func init() { + if os.Getenv("WAYLAND_DISPLAY") != "" { + pasteCmdArgs = wlpasteArgs + copyCmdArgs = wlcopyArgs + + if _, err := exec.LookPath(wlcopy); err == nil { + if _, err := exec.LookPath(wlpaste); err == nil { + return + } + } + } + + pasteCmdArgs = xclipPasteArgs + copyCmdArgs = xclipCopyArgs + + if _, err := exec.LookPath(xclip); err == nil { + return + } + + pasteCmdArgs = xselPasteArgs + copyCmdArgs = xselCopyArgs + + if _, err := exec.LookPath(xsel); err == nil { + return + } + + pasteCmdArgs = termuxPasteArgs + copyCmdArgs = termuxCopyArgs + + if _, err := exec.LookPath(termuxClipboardSet); err == nil { + if _, err := exec.LookPath(termuxClipboardGet); err == nil { + return + } + } + + pasteCmdArgs = powershellExePasteArgs + copyCmdArgs = clipExeCopyArgs + trimDos = true + + if _, err := exec.LookPath(clipExe); err == nil { + if _, err := exec.LookPath(powershellExe); err == nil { + return + } + } + + Unsupported = true +} + +func getPasteCommand() *exec.Cmd { + if Primary { + pasteCmdArgs = pasteCmdArgs[:1] + } + return exec.Command(pasteCmdArgs[0], pasteCmdArgs[1:]...) +} + +func getCopyCommand() *exec.Cmd { + if Primary { + copyCmdArgs = copyCmdArgs[:1] + } + return exec.Command(copyCmdArgs[0], copyCmdArgs[1:]...) +} + +func readAll() (string, error) { + if Unsupported { + return "", missingCommands + } + pasteCmd := getPasteCommand() + out, err := pasteCmd.Output() + if err != nil { + return "", err + } + result := string(out) + if trimDos && len(result) > 1 { + result = result[:len(result)-2] + } + return result, nil +} + +func writeAll(text string) error { + if Unsupported { + return missingCommands + } + copyCmd := getCopyCommand() + in, err := copyCmd.StdinPipe() + if err != nil { + return err + } + + if err := copyCmd.Start(); err != nil { + return err + } + if _, err := in.Write([]byte(text)); err != nil { + return err + } + if err := in.Close(); err != nil { + return err + } + return copyCmd.Wait() +} diff --git a/vendor/github.com/atotto/clipboard/clipboard_windows.go b/vendor/github.com/atotto/clipboard/clipboard_windows.go new file mode 100644 index 00000000..253bb932 --- /dev/null +++ b/vendor/github.com/atotto/clipboard/clipboard_windows.go @@ -0,0 +1,157 @@ +// Copyright 2013 @atotto. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package clipboard + +import ( + "runtime" + "syscall" + "time" + "unsafe" +) + +const ( + cfUnicodetext = 13 + gmemMoveable = 0x0002 +) + +var ( + user32 = syscall.MustLoadDLL("user32") + isClipboardFormatAvailable = user32.MustFindProc("IsClipboardFormatAvailable") + openClipboard = user32.MustFindProc("OpenClipboard") + closeClipboard = user32.MustFindProc("CloseClipboard") + emptyClipboard = user32.MustFindProc("EmptyClipboard") + getClipboardData = user32.MustFindProc("GetClipboardData") + setClipboardData = user32.MustFindProc("SetClipboardData") + + kernel32 = syscall.NewLazyDLL("kernel32") + globalAlloc = kernel32.NewProc("GlobalAlloc") + globalFree = kernel32.NewProc("GlobalFree") + globalLock = kernel32.NewProc("GlobalLock") + globalUnlock = kernel32.NewProc("GlobalUnlock") + lstrcpy = kernel32.NewProc("lstrcpyW") +) + +// waitOpenClipboard opens the clipboard, waiting for up to a second to do so. +func waitOpenClipboard() error { + started := time.Now() + limit := started.Add(time.Second) + var r uintptr + var err error + for time.Now().Before(limit) { + r, _, err = openClipboard.Call(0) + if r != 0 { + return nil + } + time.Sleep(time.Millisecond) + } + return err +} + +func readAll() (string, error) { + // LockOSThread ensure that the whole method will keep executing on the same thread from begin to end (it actually locks the goroutine thread attribution). + // Otherwise if the goroutine switch thread during execution (which is a common practice), the OpenClipboard and CloseClipboard will happen on two different threads, and it will result in a clipboard deadlock. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + if formatAvailable, _, err := isClipboardFormatAvailable.Call(cfUnicodetext); formatAvailable == 0 { + return "", err + } + err := waitOpenClipboard() + if err != nil { + return "", err + } + + h, _, err := getClipboardData.Call(cfUnicodetext) + if h == 0 { + _, _, _ = closeClipboard.Call() + return "", err + } + + l, _, err := globalLock.Call(h) + if l == 0 { + _, _, _ = closeClipboard.Call() + return "", err + } + + text := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(l))[:]) + + r, _, err := globalUnlock.Call(h) + if r == 0 { + _, _, _ = closeClipboard.Call() + return "", err + } + + closed, _, err := closeClipboard.Call() + if closed == 0 { + return "", err + } + return text, nil +} + +func writeAll(text string) error { + // LockOSThread ensure that the whole method will keep executing on the same thread from begin to end (it actually locks the goroutine thread attribution). + // Otherwise if the goroutine switch thread during execution (which is a common practice), the OpenClipboard and CloseClipboard will happen on two different threads, and it will result in a clipboard deadlock. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := waitOpenClipboard() + if err != nil { + return err + } + + r, _, err := emptyClipboard.Call(0) + if r == 0 { + _, _, _ = closeClipboard.Call() + return err + } + + data := syscall.StringToUTF16(text) + + // "If the hMem parameter identifies a memory object, the object must have + // been allocated using the function with the GMEM_MOVEABLE flag." + h, _, err := globalAlloc.Call(gmemMoveable, uintptr(len(data)*int(unsafe.Sizeof(data[0])))) + if h == 0 { + _, _, _ = closeClipboard.Call() + return err + } + defer func() { + if h != 0 { + globalFree.Call(h) + } + }() + + l, _, err := globalLock.Call(h) + if l == 0 { + _, _, _ = closeClipboard.Call() + return err + } + + r, _, err = lstrcpy.Call(l, uintptr(unsafe.Pointer(&data[0]))) + if r == 0 { + _, _, _ = closeClipboard.Call() + return err + } + + r, _, err = globalUnlock.Call(h) + if r == 0 { + if err.(syscall.Errno) != 0 { + _, _, _ = closeClipboard.Call() + return err + } + } + + r, _, err = setClipboardData.Call(cfUnicodetext, h) + if r == 0 { + _, _, _ = closeClipboard.Call() + return err + } + h = 0 // suppress deferred cleanup + closed, _, err := closeClipboard.Call() + if closed == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/charmbracelet/bubbles/LICENSE b/vendor/github.com/charmbracelet/bubbles/LICENSE new file mode 100644 index 00000000..31d76c1c --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020-2023 Charmbracelet, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/charmbracelet/bubbles/cursor/cursor.go b/vendor/github.com/charmbracelet/bubbles/cursor/cursor.go new file mode 100644 index 00000000..d101332b --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/cursor/cursor.go @@ -0,0 +1,219 @@ +// Package cursor provides cursor functionality for Bubble Tea applications. +package cursor + +import ( + "context" + "time" + + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" +) + +const defaultBlinkSpeed = time.Millisecond * 530 + +// initialBlinkMsg initializes cursor blinking. +type initialBlinkMsg struct{} + +// BlinkMsg signals that the cursor should blink. It contains metadata that +// allows us to tell if the blink message is the one we're expecting. +type BlinkMsg struct { + id int + tag int +} + +// blinkCanceled is sent when a blink operation is canceled. +type blinkCanceled struct{} + +// blinkCtx manages cursor blinking. +type blinkCtx struct { + ctx context.Context + cancel context.CancelFunc +} + +// Mode describes the behavior of the cursor. +type Mode int + +// Available cursor modes. +const ( + CursorBlink Mode = iota + CursorStatic + CursorHide +) + +// String returns the cursor mode in a human-readable format. This method is +// provisional and for informational purposes only. +func (c Mode) String() string { + return [...]string{ + "blink", + "static", + "hidden", + }[c] +} + +// Model is the Bubble Tea model for this cursor element. +type Model struct { + BlinkSpeed time.Duration + // Style for styling the cursor block. + Style lipgloss.Style + // TextStyle is the style used for the cursor when it is hidden (when blinking). + // I.e. displaying normal text. + TextStyle lipgloss.Style + + // char is the character under the cursor + char string + // The ID of this Model as it relates to other cursors + id int + // focus indicates whether the containing input is focused + focus bool + // Cursor Blink state. + Blink bool + // Used to manage cursor blink + blinkCtx *blinkCtx + // The ID of the blink message we're expecting to receive. + blinkTag int + // mode determines the behavior of the cursor + mode Mode +} + +// New creates a new model with default settings. +func New() Model { + return Model{ + BlinkSpeed: defaultBlinkSpeed, + + Blink: true, + mode: CursorBlink, + + blinkCtx: &blinkCtx{ + ctx: context.Background(), + }, + } +} + +// Update updates the cursor. +func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { + switch msg := msg.(type) { + case initialBlinkMsg: + // We accept all initialBlinkMsgs generated by the Blink command. + + if m.mode != CursorBlink || !m.focus { + return m, nil + } + + cmd := m.BlinkCmd() + return m, cmd + + case tea.FocusMsg: + return m, m.Focus() + + case tea.BlurMsg: + m.Blur() + return m, nil + + case BlinkMsg: + // We're choosy about whether to accept blinkMsgs so that our cursor + // only exactly when it should. + + // Is this model blink-able? + if m.mode != CursorBlink || !m.focus { + return m, nil + } + + // Were we expecting this blink message? + if msg.id != m.id || msg.tag != m.blinkTag { + return m, nil + } + + var cmd tea.Cmd + if m.mode == CursorBlink { + m.Blink = !m.Blink + cmd = m.BlinkCmd() + } + return m, cmd + + case blinkCanceled: // no-op + return m, nil + } + return m, nil +} + +// Mode returns the model's cursor mode. For available cursor modes, see +// type Mode. +func (m Model) Mode() Mode { + return m.mode +} + +// SetMode sets the model's cursor mode. This method returns a command. +// +// For available cursor modes, see type CursorMode. +func (m *Model) SetMode(mode Mode) tea.Cmd { + // Adjust the mode value if it's value is out of range + if mode < CursorBlink || mode > CursorHide { + return nil + } + m.mode = mode + m.Blink = m.mode == CursorHide || !m.focus + if mode == CursorBlink { + return Blink + } + return nil +} + +// BlinkCmd is a command used to manage cursor blinking. +func (m *Model) BlinkCmd() tea.Cmd { + if m.mode != CursorBlink { + return nil + } + + if m.blinkCtx != nil && m.blinkCtx.cancel != nil { + m.blinkCtx.cancel() + } + + ctx, cancel := context.WithTimeout(m.blinkCtx.ctx, m.BlinkSpeed) + m.blinkCtx.cancel = cancel + + m.blinkTag++ + + return func() tea.Msg { + defer cancel() + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded { + return BlinkMsg{id: m.id, tag: m.blinkTag} + } + return blinkCanceled{} + } +} + +// Blink is a command used to initialize cursor blinking. +func Blink() tea.Msg { + return initialBlinkMsg{} +} + +// Focus focuses the cursor to allow it to blink if desired. +func (m *Model) Focus() tea.Cmd { + m.focus = true + m.Blink = m.mode == CursorHide // show the cursor unless we've explicitly hidden it + + if m.mode == CursorBlink && m.focus { + return m.BlinkCmd() + } + return nil +} + +// Blur blurs the cursor. +func (m *Model) Blur() { + m.focus = false + m.Blink = true +} + +// SetChar sets the character under the cursor. +func (m *Model) SetChar(char string) { + m.char = char +} + +// View displays the cursor. +func (m Model) View() string { + if m.Blink { + return m.TextStyle.Inline(true).Render(m.char) + } + return m.Style.Inline(true).Reverse(true).Render(m.char) +} diff --git a/vendor/github.com/charmbracelet/bubbles/key/key.go b/vendor/github.com/charmbracelet/bubbles/key/key.go new file mode 100644 index 00000000..0682665d --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/key/key.go @@ -0,0 +1,140 @@ +// Package key provides some types and functions for generating user-definable +// keymappings useful in Bubble Tea components. There are a few different ways +// you can define a keymapping with this package. Here's one example: +// +// type KeyMap struct { +// Up key.Binding +// Down key.Binding +// } +// +// var DefaultKeyMap = KeyMap{ +// Up: key.NewBinding( +// key.WithKeys("k", "up"), // actual keybindings +// key.WithHelp("↑/k", "move up"), // corresponding help text +// ), +// Down: key.NewBinding( +// key.WithKeys("j", "down"), +// key.WithHelp("↓/j", "move down"), +// ), +// } +// +// func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { +// switch msg := msg.(type) { +// case tea.KeyMsg: +// switch { +// case key.Matches(msg, DefaultKeyMap.Up): +// // The user pressed up +// case key.Matches(msg, DefaultKeyMap.Down): +// // The user pressed down +// } +// } +// +// // ... +// } +// +// The help information, which is not used in the example above, can be used +// to render help text for keystrokes in your views. +package key + +import "fmt" + +// Binding describes a set of keybindings and, optionally, their associated +// help text. +type Binding struct { + keys []string + help Help + disabled bool +} + +// BindingOpt is an initialization option for a keybinding. It's used as an +// argument to NewBinding. +type BindingOpt func(*Binding) + +// NewBinding returns a new keybinding from a set of BindingOpt options. +func NewBinding(opts ...BindingOpt) Binding { + b := &Binding{} + for _, opt := range opts { + opt(b) + } + return *b +} + +// WithKeys initializes a keybinding with the given keystrokes. +func WithKeys(keys ...string) BindingOpt { + return func(b *Binding) { + b.keys = keys + } +} + +// WithHelp initializes a keybinding with the given help text. +func WithHelp(key, desc string) BindingOpt { + return func(b *Binding) { + b.help = Help{Key: key, Desc: desc} + } +} + +// WithDisabled initializes a disabled keybinding. +func WithDisabled() BindingOpt { + return func(b *Binding) { + b.disabled = true + } +} + +// SetKeys sets the keys for the keybinding. +func (b *Binding) SetKeys(keys ...string) { + b.keys = keys +} + +// Keys returns the keys for the keybinding. +func (b Binding) Keys() []string { + return b.keys +} + +// SetHelp sets the help text for the keybinding. +func (b *Binding) SetHelp(key, desc string) { + b.help = Help{Key: key, Desc: desc} +} + +// Help returns the Help information for the keybinding. +func (b Binding) Help() Help { + return b.help +} + +// Enabled returns whether or not the keybinding is enabled. Disabled +// keybindings won't be activated and won't show up in help. Keybindings are +// enabled by default. +func (b Binding) Enabled() bool { + return !b.disabled && b.keys != nil +} + +// SetEnabled enables or disables the keybinding. +func (b *Binding) SetEnabled(v bool) { + b.disabled = !v +} + +// Unbind removes the keys and help from this binding, effectively nullifying +// it. This is a step beyond disabling it, since applications can enable +// or disable key bindings based on application state. +func (b *Binding) Unbind() { + b.keys = nil + b.help = Help{} +} + +// Help is help information for a given keybinding. +type Help struct { + Key string + Desc string +} + +// Matches checks if the given key matches the given bindings. +func Matches[Key fmt.Stringer](k Key, b ...Binding) bool { + keys := k.String() + for _, binding := range b { + for _, v := range binding.keys { + if keys == v && binding.Enabled() { + return true + } + } + } + return false +} diff --git a/vendor/github.com/charmbracelet/bubbles/runeutil/runeutil.go b/vendor/github.com/charmbracelet/bubbles/runeutil/runeutil.go new file mode 100644 index 00000000..82ea90a2 --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/runeutil/runeutil.go @@ -0,0 +1,102 @@ +// Package runeutil provides a utility function for use in Bubbles +// that can process Key messages containing runes. +package runeutil + +import ( + "unicode" + "unicode/utf8" +) + +// Sanitizer is a helper for bubble widgets that want to process +// Runes from input key messages. +type Sanitizer interface { + // Sanitize removes control characters from runes in a KeyRunes + // message, and optionally replaces newline/carriage return/tabs by a + // specified character. + // + // The rune array is modified in-place if possible. In that case, the + // returned slice is the original slice shortened after the control + // characters have been removed/translated. + Sanitize(runes []rune) []rune +} + +// NewSanitizer constructs a rune sanitizer. +func NewSanitizer(opts ...Option) Sanitizer { + s := sanitizer{ + replaceNewLine: []rune("\n"), + replaceTab: []rune(" "), + } + for _, o := range opts { + s = o(s) + } + return &s +} + +// Option is the type of option that can be passed to Sanitize(). +type Option func(sanitizer) sanitizer + +// ReplaceTabs replaces tabs by the specified string. +func ReplaceTabs(tabRepl string) Option { + return func(s sanitizer) sanitizer { + s.replaceTab = []rune(tabRepl) + return s + } +} + +// ReplaceNewlines replaces newline characters by the specified string. +func ReplaceNewlines(nlRepl string) Option { + return func(s sanitizer) sanitizer { + s.replaceNewLine = []rune(nlRepl) + return s + } +} + +func (s *sanitizer) Sanitize(runes []rune) []rune { + // dstrunes are where we are storing the result. + dstrunes := runes[:0:len(runes)] + // copied indicates whether dstrunes is an alias of runes + // or a copy. We need a copy when dst moves past src. + // We use this as an optimization to avoid allocating + // a new rune slice in the common case where the output + // is smaller or equal to the input. + copied := false + + for src := 0; src < len(runes); src++ { + r := runes[src] + switch { + case r == utf8.RuneError: + // skip + + case r == '\r' || r == '\n': + if len(dstrunes)+len(s.replaceNewLine) > src && !copied { + dst := len(dstrunes) + dstrunes = make([]rune, dst, len(runes)+len(s.replaceNewLine)) + copy(dstrunes, runes[:dst]) + copied = true + } + dstrunes = append(dstrunes, s.replaceNewLine...) + + case r == '\t': + if len(dstrunes)+len(s.replaceTab) > src && !copied { + dst := len(dstrunes) + dstrunes = make([]rune, dst, len(runes)+len(s.replaceTab)) + copy(dstrunes, runes[:dst]) + copied = true + } + dstrunes = append(dstrunes, s.replaceTab...) + + case unicode.IsControl(r): + // Other control characters: skip. + + default: + // Keep the character. + dstrunes = append(dstrunes, runes[src]) + } + } + return dstrunes +} + +type sanitizer struct { + replaceNewLine []rune + replaceTab []rune +} diff --git a/vendor/github.com/charmbracelet/bubbles/spinner/spinner.go b/vendor/github.com/charmbracelet/bubbles/spinner/spinner.go new file mode 100644 index 00000000..b2d24b09 --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/spinner/spinner.go @@ -0,0 +1,224 @@ +// Package spinner provides a spinner component for Bubble Tea applications. +package spinner + +import ( + "sync/atomic" + "time" + + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" +) + +// Internal ID management. Used during animating to ensure that frame messages +// are received only by spinner components that sent them. +var lastID int64 + +func nextID() int { + return int(atomic.AddInt64(&lastID, 1)) +} + +// Spinner is a set of frames used in animating the spinner. +type Spinner struct { + Frames []string + FPS time.Duration +} + +// Some spinners to choose from. You could also make your own. +var ( + Line = Spinner{ + Frames: []string{"|", "/", "-", "\\"}, + FPS: time.Second / 10, //nolint:mnd + } + Dot = Spinner{ + Frames: []string{"⣾ ", "⣽ ", "⣻ ", "⢿ ", "⡿ ", "⣟ ", "⣯ ", "⣷ "}, + FPS: time.Second / 10, //nolint:mnd + } + MiniDot = Spinner{ + Frames: []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}, + FPS: time.Second / 12, //nolint:mnd + } + Jump = Spinner{ + Frames: []string{"⢄", "⢂", "⢁", "⡁", "⡈", "⡐", "⡠"}, + FPS: time.Second / 10, //nolint:mnd + } + Pulse = Spinner{ + Frames: []string{"█", "▓", "▒", "░"}, + FPS: time.Second / 8, //nolint:mnd + } + Points = Spinner{ + Frames: []string{"∙∙∙", "●∙∙", "∙●∙", "∙∙●"}, + FPS: time.Second / 7, //nolint:mnd + } + Globe = Spinner{ + Frames: []string{"🌍", "🌎", "🌏"}, + FPS: time.Second / 4, //nolint:mnd + } + Moon = Spinner{ + Frames: []string{"🌑", "🌒", "🌓", "🌔", "🌕", "🌖", "🌗", "🌘"}, + FPS: time.Second / 8, //nolint:mnd + } + Monkey = Spinner{ + Frames: []string{"🙈", "🙉", "🙊"}, + FPS: time.Second / 3, //nolint:mnd + } + Meter = Spinner{ + Frames: []string{ + "▱▱▱", + "▰▱▱", + "▰▰▱", + "▰▰▰", + "▰▰▱", + "▰▱▱", + "▱▱▱", + }, + FPS: time.Second / 7, //nolint:mnd + } + Hamburger = Spinner{ + Frames: []string{"☱", "☲", "☴", "☲"}, + FPS: time.Second / 3, //nolint:mnd + } + Ellipsis = Spinner{ + Frames: []string{"", ".", "..", "..."}, + FPS: time.Second / 3, //nolint:mnd + } +) + +// Model contains the state for the spinner. Use New to create new models +// rather than using Model as a struct literal. +type Model struct { + // Spinner settings to use. See type Spinner. + Spinner Spinner + + // Style sets the styling for the spinner. Most of the time you'll just + // want foreground and background coloring, and potentially some padding. + // + // For an introduction to styling with Lip Gloss see: + // https://github.com/charmbracelet/lipgloss + Style lipgloss.Style + + frame int + id int + tag int +} + +// ID returns the spinner's unique ID. +func (m Model) ID() int { + return m.id +} + +// New returns a model with default values. +func New(opts ...Option) Model { + m := Model{ + Spinner: Line, + id: nextID(), + } + + for _, opt := range opts { + opt(&m) + } + + return m +} + +// NewModel returns a model with default values. +// +// Deprecated: use [New] instead. +var NewModel = New + +// TickMsg indicates that the timer has ticked and we should render a frame. +type TickMsg struct { + Time time.Time + tag int + ID int +} + +// Update is the Tea update function. +func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { + switch msg := msg.(type) { + case TickMsg: + // If an ID is set, and the ID doesn't belong to this spinner, reject + // the message. + if msg.ID > 0 && msg.ID != m.id { + return m, nil + } + + // If a tag is set, and it's not the one we expect, reject the message. + // This prevents the spinner from receiving too many messages and + // thus spinning too fast. + if msg.tag > 0 && msg.tag != m.tag { + return m, nil + } + + m.frame++ + if m.frame >= len(m.Spinner.Frames) { + m.frame = 0 + } + + m.tag++ + return m, m.tick(m.id, m.tag) + default: + return m, nil + } +} + +// View renders the model's view. +func (m Model) View() string { + if m.frame >= len(m.Spinner.Frames) { + return "(error)" + } + + return m.Style.Render(m.Spinner.Frames[m.frame]) +} + +// Tick is the command used to advance the spinner one frame. Use this command +// to effectively start the spinner. +func (m Model) Tick() tea.Msg { + return TickMsg{ + // The time at which the tick occurred. + Time: time.Now(), + + // The ID of the spinner that this message belongs to. This can be + // helpful when routing messages, however bear in mind that spinners + // will ignore messages that don't contain ID by default. + ID: m.id, + + tag: m.tag, + } +} + +func (m Model) tick(id, tag int) tea.Cmd { + return tea.Tick(m.Spinner.FPS, func(t time.Time) tea.Msg { + return TickMsg{ + Time: t, + ID: id, + tag: tag, + } + }) +} + +// Tick is the command used to advance the spinner one frame. Use this command +// to effectively start the spinner. +// +// Deprecated: Use [Model.Tick] instead. +func Tick() tea.Msg { + return TickMsg{Time: time.Now()} +} + +// Option is used to set options in New. For example: +// +// spinner := New(WithSpinner(Dot)) +type Option func(*Model) + +// WithSpinner is an option to set the spinner. +func WithSpinner(spinner Spinner) Option { + return func(m *Model) { + m.Spinner = spinner + } +} + +// WithStyle is an option to set the spinner style. +func WithStyle(style lipgloss.Style) Option { + return func(m *Model) { + m.Style = style + } +} diff --git a/vendor/github.com/charmbracelet/bubbles/textinput/textinput.go b/vendor/github.com/charmbracelet/bubbles/textinput/textinput.go new file mode 100644 index 00000000..84cbbc93 --- /dev/null +++ b/vendor/github.com/charmbracelet/bubbles/textinput/textinput.go @@ -0,0 +1,898 @@ +// Package textinput provides a text input component for Bubble Tea +// applications. +package textinput + +import ( + "reflect" + "strings" + "time" + "unicode" + + "github.com/atotto/clipboard" + "github.com/charmbracelet/bubbles/cursor" + "github.com/charmbracelet/bubbles/key" + "github.com/charmbracelet/bubbles/runeutil" + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" + rw "github.com/mattn/go-runewidth" + "github.com/rivo/uniseg" +) + +// Internal messages for clipboard operations. +type ( + pasteMsg string + pasteErrMsg struct{ error } +) + +// EchoMode sets the input behavior of the text input field. +type EchoMode int + +const ( + // EchoNormal displays text as is. This is the default behavior. + EchoNormal EchoMode = iota + + // EchoPassword displays the EchoCharacter mask instead of actual + // characters. This is commonly used for password fields. + EchoPassword + + // EchoNone displays nothing as characters are entered. This is commonly + // seen for password fields on the command line. + EchoNone +) + +// ValidateFunc is a function that returns an error if the input is invalid. +type ValidateFunc func(string) error + +// KeyMap is the key bindings for different actions within the textinput. +type KeyMap struct { + CharacterForward key.Binding + CharacterBackward key.Binding + WordForward key.Binding + WordBackward key.Binding + DeleteWordBackward key.Binding + DeleteWordForward key.Binding + DeleteAfterCursor key.Binding + DeleteBeforeCursor key.Binding + DeleteCharacterBackward key.Binding + DeleteCharacterForward key.Binding + LineStart key.Binding + LineEnd key.Binding + Paste key.Binding + AcceptSuggestion key.Binding + NextSuggestion key.Binding + PrevSuggestion key.Binding +} + +// DefaultKeyMap is the default set of key bindings for navigating and acting +// upon the textinput. +var DefaultKeyMap = KeyMap{ + CharacterForward: key.NewBinding(key.WithKeys("right", "ctrl+f")), + CharacterBackward: key.NewBinding(key.WithKeys("left", "ctrl+b")), + WordForward: key.NewBinding(key.WithKeys("alt+right", "ctrl+right", "alt+f")), + WordBackward: key.NewBinding(key.WithKeys("alt+left", "ctrl+left", "alt+b")), + DeleteWordBackward: key.NewBinding(key.WithKeys("alt+backspace", "ctrl+w")), + DeleteWordForward: key.NewBinding(key.WithKeys("alt+delete", "alt+d")), + DeleteAfterCursor: key.NewBinding(key.WithKeys("ctrl+k")), + DeleteBeforeCursor: key.NewBinding(key.WithKeys("ctrl+u")), + DeleteCharacterBackward: key.NewBinding(key.WithKeys("backspace", "ctrl+h")), + DeleteCharacterForward: key.NewBinding(key.WithKeys("delete", "ctrl+d")), + LineStart: key.NewBinding(key.WithKeys("home", "ctrl+a")), + LineEnd: key.NewBinding(key.WithKeys("end", "ctrl+e")), + Paste: key.NewBinding(key.WithKeys("ctrl+v")), + AcceptSuggestion: key.NewBinding(key.WithKeys("tab")), + NextSuggestion: key.NewBinding(key.WithKeys("down", "ctrl+n")), + PrevSuggestion: key.NewBinding(key.WithKeys("up", "ctrl+p")), +} + +// Model is the Bubble Tea model for this text input element. +type Model struct { + Err error + + // General settings. + Prompt string + Placeholder string + EchoMode EchoMode + EchoCharacter rune + Cursor cursor.Model + + // Deprecated: use [cursor.BlinkSpeed] instead. + BlinkSpeed time.Duration + + // Styles. These will be applied as inline styles. + // + // For an introduction to styling with Lip Gloss see: + // https://github.com/charmbracelet/lipgloss + PromptStyle lipgloss.Style + TextStyle lipgloss.Style + PlaceholderStyle lipgloss.Style + CompletionStyle lipgloss.Style + + // Deprecated: use Cursor.Style instead. + CursorStyle lipgloss.Style + + // CharLimit is the maximum amount of characters this input element will + // accept. If 0 or less, there's no limit. + CharLimit int + + // Width is the maximum number of characters that can be displayed at once. + // It essentially treats the text field like a horizontally scrolling + // viewport. If 0 or less this setting is ignored. + Width int + + // KeyMap encodes the keybindings recognized by the widget. + KeyMap KeyMap + + // Underlying text value. + value []rune + + // focus indicates whether user input focus should be on this input + // component. When false, ignore keyboard input and hide the cursor. + focus bool + + // Cursor position. + pos int + + // Used to emulate a viewport when width is set and the content is + // overflowing. + offset int + offsetRight int + + // Validate is a function that checks whether or not the text within the + // input is valid. If it is not valid, the `Err` field will be set to the + // error returned by the function. If the function is not defined, all + // input is considered valid. + Validate ValidateFunc + + // rune sanitizer for input. + rsan runeutil.Sanitizer + + // Should the input suggest to complete + ShowSuggestions bool + + // suggestions is a list of suggestions that may be used to complete the + // input. + suggestions [][]rune + matchedSuggestions [][]rune + currentSuggestionIndex int +} + +// New creates a new model with default settings. +func New() Model { + return Model{ + Prompt: "> ", + EchoCharacter: '*', + CharLimit: 0, + PlaceholderStyle: lipgloss.NewStyle().Foreground(lipgloss.Color("240")), + ShowSuggestions: false, + CompletionStyle: lipgloss.NewStyle().Foreground(lipgloss.Color("240")), + Cursor: cursor.New(), + KeyMap: DefaultKeyMap, + + suggestions: [][]rune{}, + value: nil, + focus: false, + pos: 0, + } +} + +// NewModel creates a new model with default settings. +// +// Deprecated: Use [New] instead. +var NewModel = New + +// SetValue sets the value of the text input. +func (m *Model) SetValue(s string) { + // Clean up any special characters in the input provided by the + // caller. This avoids bugs due to e.g. tab characters and whatnot. + runes := m.san().Sanitize([]rune(s)) + err := m.validate(runes) + m.setValueInternal(runes, err) +} + +func (m *Model) setValueInternal(runes []rune, err error) { + m.Err = err + + empty := len(m.value) == 0 + + if m.CharLimit > 0 && len(runes) > m.CharLimit { + m.value = runes[:m.CharLimit] + } else { + m.value = runes + } + if (m.pos == 0 && empty) || m.pos > len(m.value) { + m.SetCursor(len(m.value)) + } + m.handleOverflow() +} + +// Value returns the value of the text input. +func (m Model) Value() string { + return string(m.value) +} + +// Position returns the cursor position. +func (m Model) Position() int { + return m.pos +} + +// SetCursor moves the cursor to the given position. If the position is +// out of bounds the cursor will be moved to the start or end accordingly. +func (m *Model) SetCursor(pos int) { + m.pos = clamp(pos, 0, len(m.value)) + m.handleOverflow() +} + +// CursorStart moves the cursor to the start of the input field. +func (m *Model) CursorStart() { + m.SetCursor(0) +} + +// CursorEnd moves the cursor to the end of the input field. +func (m *Model) CursorEnd() { + m.SetCursor(len(m.value)) +} + +// Focused returns the focus state on the model. +func (m Model) Focused() bool { + return m.focus +} + +// Focus sets the focus state on the model. When the model is in focus it can +// receive keyboard input and the cursor will be shown. +func (m *Model) Focus() tea.Cmd { + m.focus = true + return m.Cursor.Focus() +} + +// Blur removes the focus state on the model. When the model is blurred it can +// not receive keyboard input and the cursor will be hidden. +func (m *Model) Blur() { + m.focus = false + m.Cursor.Blur() +} + +// Reset sets the input to its default state with no input. +func (m *Model) Reset() { + m.value = nil + m.SetCursor(0) +} + +// SetSuggestions sets the suggestions for the input. +func (m *Model) SetSuggestions(suggestions []string) { + m.suggestions = make([][]rune, len(suggestions)) + for i, s := range suggestions { + m.suggestions[i] = []rune(s) + } + + m.updateSuggestions() +} + +// rsan initializes or retrieves the rune sanitizer. +func (m *Model) san() runeutil.Sanitizer { + if m.rsan == nil { + // Textinput has all its input on a single line so collapse + // newlines/tabs to single spaces. + m.rsan = runeutil.NewSanitizer( + runeutil.ReplaceTabs(" "), runeutil.ReplaceNewlines(" ")) + } + return m.rsan +} + +func (m *Model) insertRunesFromUserInput(v []rune) { + // Clean up any special characters in the input provided by the + // clipboard. This avoids bugs due to e.g. tab characters and + // whatnot. + paste := m.san().Sanitize(v) + + var availSpace int + if m.CharLimit > 0 { + availSpace = m.CharLimit - len(m.value) + + // If the char limit's been reached, cancel. + if availSpace <= 0 { + return + } + + // If there's not enough space to paste the whole thing cut the pasted + // runes down so they'll fit. + if availSpace < len(paste) { + paste = paste[:availSpace] + } + } + + // Stuff before and after the cursor + head := m.value[:m.pos] + tailSrc := m.value[m.pos:] + tail := make([]rune, len(tailSrc)) + copy(tail, tailSrc) + + // Insert pasted runes + for _, r := range paste { + head = append(head, r) + m.pos++ + if m.CharLimit > 0 { + availSpace-- + if availSpace <= 0 { + break + } + } + } + + // Put it all back together + value := append(head, tail...) + inputErr := m.validate(value) + m.setValueInternal(value, inputErr) +} + +// If a max width is defined, perform some logic to treat the visible area +// as a horizontally scrolling viewport. +func (m *Model) handleOverflow() { + if m.Width <= 0 || uniseg.StringWidth(string(m.value)) <= m.Width { + m.offset = 0 + m.offsetRight = len(m.value) + return + } + + // Correct right offset if we've deleted characters + m.offsetRight = min(m.offsetRight, len(m.value)) + + if m.pos < m.offset { + m.offset = m.pos + + w := 0 + i := 0 + runes := m.value[m.offset:] + + for i < len(runes) && w <= m.Width { + w += rw.RuneWidth(runes[i]) + if w <= m.Width+1 { + i++ + } + } + + m.offsetRight = m.offset + i + } else if m.pos >= m.offsetRight { + m.offsetRight = m.pos + + w := 0 + runes := m.value[:m.offsetRight] + i := len(runes) - 1 + + for i > 0 && w < m.Width { + w += rw.RuneWidth(runes[i]) + if w <= m.Width { + i-- + } + } + + m.offset = m.offsetRight - (len(runes) - 1 - i) + } +} + +// deleteBeforeCursor deletes all text before the cursor. +func (m *Model) deleteBeforeCursor() { + m.value = m.value[m.pos:] + m.Err = m.validate(m.value) + m.offset = 0 + m.SetCursor(0) +} + +// deleteAfterCursor deletes all text after the cursor. If input is masked +// delete everything after the cursor so as not to reveal word breaks in the +// masked input. +func (m *Model) deleteAfterCursor() { + m.value = m.value[:m.pos] + m.Err = m.validate(m.value) + m.SetCursor(len(m.value)) +} + +// deleteWordBackward deletes the word left to the cursor. +func (m *Model) deleteWordBackward() { + if m.pos == 0 || len(m.value) == 0 { + return + } + + if m.EchoMode != EchoNormal { + m.deleteBeforeCursor() + return + } + + // Linter note: it's critical that we acquire the initial cursor position + // here prior to altering it via SetCursor() below. As such, moving this + // call into the corresponding if clause does not apply here. + oldPos := m.pos //nolint:ifshort + + m.SetCursor(m.pos - 1) + for unicode.IsSpace(m.value[m.pos]) { + if m.pos <= 0 { + break + } + // ignore series of whitespace before cursor + m.SetCursor(m.pos - 1) + } + + for m.pos > 0 { + if !unicode.IsSpace(m.value[m.pos]) { + m.SetCursor(m.pos - 1) + } else { + if m.pos > 0 { + // keep the previous space + m.SetCursor(m.pos + 1) + } + break + } + } + + if oldPos > len(m.value) { + m.value = m.value[:m.pos] + } else { + m.value = append(m.value[:m.pos], m.value[oldPos:]...) + } + m.Err = m.validate(m.value) +} + +// deleteWordForward deletes the word right to the cursor. If input is masked +// delete everything after the cursor so as not to reveal word breaks in the +// masked input. +func (m *Model) deleteWordForward() { + if m.pos >= len(m.value) || len(m.value) == 0 { + return + } + + if m.EchoMode != EchoNormal { + m.deleteAfterCursor() + return + } + + oldPos := m.pos + m.SetCursor(m.pos + 1) + for unicode.IsSpace(m.value[m.pos]) { + // ignore series of whitespace after cursor + m.SetCursor(m.pos + 1) + + if m.pos >= len(m.value) { + break + } + } + + for m.pos < len(m.value) { + if !unicode.IsSpace(m.value[m.pos]) { + m.SetCursor(m.pos + 1) + } else { + break + } + } + + if m.pos > len(m.value) { + m.value = m.value[:oldPos] + } else { + m.value = append(m.value[:oldPos], m.value[m.pos:]...) + } + m.Err = m.validate(m.value) + + m.SetCursor(oldPos) +} + +// wordBackward moves the cursor one word to the left. If input is masked, move +// input to the start so as not to reveal word breaks in the masked input. +func (m *Model) wordBackward() { + if m.pos == 0 || len(m.value) == 0 { + return + } + + if m.EchoMode != EchoNormal { + m.CursorStart() + return + } + + i := m.pos - 1 + for i >= 0 { + if unicode.IsSpace(m.value[i]) { + m.SetCursor(m.pos - 1) + i-- + } else { + break + } + } + + for i >= 0 { + if !unicode.IsSpace(m.value[i]) { + m.SetCursor(m.pos - 1) + i-- + } else { + break + } + } +} + +// wordForward moves the cursor one word to the right. If the input is masked, +// move input to the end so as not to reveal word breaks in the masked input. +func (m *Model) wordForward() { + if m.pos >= len(m.value) || len(m.value) == 0 { + return + } + + if m.EchoMode != EchoNormal { + m.CursorEnd() + return + } + + i := m.pos + for i < len(m.value) { + if unicode.IsSpace(m.value[i]) { + m.SetCursor(m.pos + 1) + i++ + } else { + break + } + } + + for i < len(m.value) { + if !unicode.IsSpace(m.value[i]) { + m.SetCursor(m.pos + 1) + i++ + } else { + break + } + } +} + +func (m Model) echoTransform(v string) string { + switch m.EchoMode { + case EchoPassword: + return strings.Repeat(string(m.EchoCharacter), uniseg.StringWidth(v)) + case EchoNone: + return "" + case EchoNormal: + return v + default: + return v + } +} + +// Update is the Bubble Tea update loop. +func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { + if !m.focus { + return m, nil + } + + // Need to check for completion before, because key is configurable and might be double assigned + keyMsg, ok := msg.(tea.KeyMsg) + if ok && key.Matches(keyMsg, m.KeyMap.AcceptSuggestion) { + if m.canAcceptSuggestion() { + m.value = append(m.value, m.matchedSuggestions[m.currentSuggestionIndex][len(m.value):]...) + m.CursorEnd() + } + } + + // Let's remember where the position of the cursor currently is so that if + // the cursor position changes, we can reset the blink. + oldPos := m.pos + + switch msg := msg.(type) { + case tea.KeyMsg: + switch { + case key.Matches(msg, m.KeyMap.DeleteWordBackward): + m.deleteWordBackward() + case key.Matches(msg, m.KeyMap.DeleteCharacterBackward): + m.Err = nil + if len(m.value) > 0 { + m.value = append(m.value[:max(0, m.pos-1)], m.value[m.pos:]...) + m.Err = m.validate(m.value) + if m.pos > 0 { + m.SetCursor(m.pos - 1) + } + } + case key.Matches(msg, m.KeyMap.WordBackward): + m.wordBackward() + case key.Matches(msg, m.KeyMap.CharacterBackward): + if m.pos > 0 { + m.SetCursor(m.pos - 1) + } + case key.Matches(msg, m.KeyMap.WordForward): + m.wordForward() + case key.Matches(msg, m.KeyMap.CharacterForward): + if m.pos < len(m.value) { + m.SetCursor(m.pos + 1) + } + case key.Matches(msg, m.KeyMap.LineStart): + m.CursorStart() + case key.Matches(msg, m.KeyMap.DeleteCharacterForward): + if len(m.value) > 0 && m.pos < len(m.value) { + m.value = append(m.value[:m.pos], m.value[m.pos+1:]...) + m.Err = m.validate(m.value) + } + case key.Matches(msg, m.KeyMap.LineEnd): + m.CursorEnd() + case key.Matches(msg, m.KeyMap.DeleteAfterCursor): + m.deleteAfterCursor() + case key.Matches(msg, m.KeyMap.DeleteBeforeCursor): + m.deleteBeforeCursor() + case key.Matches(msg, m.KeyMap.Paste): + return m, Paste + case key.Matches(msg, m.KeyMap.DeleteWordForward): + m.deleteWordForward() + case key.Matches(msg, m.KeyMap.NextSuggestion): + m.nextSuggestion() + case key.Matches(msg, m.KeyMap.PrevSuggestion): + m.previousSuggestion() + default: + // Input one or more regular characters. + m.insertRunesFromUserInput(msg.Runes) + } + + // Check again if can be completed + // because value might be something that does not match the completion prefix + m.updateSuggestions() + + case pasteMsg: + m.insertRunesFromUserInput([]rune(msg)) + + case pasteErrMsg: + m.Err = msg + } + + var cmds []tea.Cmd + var cmd tea.Cmd + + m.Cursor, cmd = m.Cursor.Update(msg) + cmds = append(cmds, cmd) + + if oldPos != m.pos && m.Cursor.Mode() == cursor.CursorBlink { + m.Cursor.Blink = false + cmds = append(cmds, m.Cursor.BlinkCmd()) + } + + m.handleOverflow() + return m, tea.Batch(cmds...) +} + +// View renders the textinput in its current state. +func (m Model) View() string { + // Placeholder text + if len(m.value) == 0 && m.Placeholder != "" { + return m.placeholderView() + } + + styleText := m.TextStyle.Inline(true).Render + + value := m.value[m.offset:m.offsetRight] + pos := max(0, m.pos-m.offset) + v := styleText(m.echoTransform(string(value[:pos]))) + + if pos < len(value) { //nolint:nestif + char := m.echoTransform(string(value[pos])) + m.Cursor.SetChar(char) + v += m.Cursor.View() // cursor and text under it + v += styleText(m.echoTransform(string(value[pos+1:]))) // text after cursor + v += m.completionView(0) // suggested completion + } else { + if m.focus && m.canAcceptSuggestion() { + suggestion := m.matchedSuggestions[m.currentSuggestionIndex] + if len(value) < len(suggestion) { + m.Cursor.TextStyle = m.CompletionStyle + m.Cursor.SetChar(m.echoTransform(string(suggestion[pos]))) + v += m.Cursor.View() + v += m.completionView(1) + } else { + m.Cursor.SetChar(" ") + v += m.Cursor.View() + } + } else { + m.Cursor.SetChar(" ") + v += m.Cursor.View() + } + } + + // If a max width and background color were set fill the empty spaces with + // the background color. + valWidth := uniseg.StringWidth(string(value)) + if m.Width > 0 && valWidth <= m.Width { + padding := max(0, m.Width-valWidth) + if valWidth+padding <= m.Width && pos < len(value) { + padding++ + } + v += styleText(strings.Repeat(" ", padding)) + } + + return m.PromptStyle.Render(m.Prompt) + v +} + +// placeholderView returns the prompt and placeholder view, if any. +func (m Model) placeholderView() string { + var ( + v string + style = m.PlaceholderStyle.Inline(true).Render + ) + + p := make([]rune, m.Width+1) + copy(p, []rune(m.Placeholder)) + + m.Cursor.TextStyle = m.PlaceholderStyle + m.Cursor.SetChar(string(p[:1])) + v += m.Cursor.View() + + // If the entire placeholder is already set and no padding is needed, finish + if m.Width < 1 && len(p) <= 1 { + return m.PromptStyle.Render(m.Prompt) + v + } + + // If Width is set then size placeholder accordingly + if m.Width > 0 { + // available width is width - len + cursor offset of 1 + minWidth := lipgloss.Width(m.Placeholder) + availWidth := m.Width - minWidth + 1 + + // if width < len, 'subtract'(add) number to len and dont add padding + if availWidth < 0 { + minWidth += availWidth + availWidth = 0 + } + // append placeholder[len] - cursor, append padding + v += style(string(p[1:minWidth])) + v += style(strings.Repeat(" ", availWidth)) + } else { + // if there is no width, the placeholder can be any length + v += style(string(p[1:])) + } + + return m.PromptStyle.Render(m.Prompt) + v +} + +// Blink is a command used to initialize cursor blinking. +func Blink() tea.Msg { + return cursor.Blink() +} + +// Paste is a command for pasting from the clipboard into the text input. +func Paste() tea.Msg { + str, err := clipboard.ReadAll() + if err != nil { + return pasteErrMsg{err} + } + return pasteMsg(str) +} + +func clamp(v, low, high int) int { + if high < low { + low, high = high, low + } + return min(high, max(low, v)) +} + +// Deprecated. + +// Deprecated: use [cursor.Mode]. +// +//nolint:revive +type CursorMode int + +//nolint:revive +const ( + // Deprecated: use [cursor.CursorBlink]. + CursorBlink = CursorMode(cursor.CursorBlink) + // Deprecated: use [cursor.CursorStatic]. + CursorStatic = CursorMode(cursor.CursorStatic) + // Deprecated: use [cursor.CursorHide]. + CursorHide = CursorMode(cursor.CursorHide) +) + +func (c CursorMode) String() string { + return cursor.Mode(c).String() +} + +// Deprecated: use [cursor.Mode]. +// +//nolint:revive +func (m Model) CursorMode() CursorMode { + return CursorMode(m.Cursor.Mode()) +} + +// Deprecated: use cursor.SetMode(). +// +//nolint:revive +func (m *Model) SetCursorMode(mode CursorMode) tea.Cmd { + return m.Cursor.SetMode(cursor.Mode(mode)) +} + +func (m Model) completionView(offset int) string { + var ( + value = m.value + style = m.PlaceholderStyle.Inline(true).Render + ) + + if m.canAcceptSuggestion() { + suggestion := m.matchedSuggestions[m.currentSuggestionIndex] + if len(value) < len(suggestion) { + return style(string(suggestion[len(value)+offset:])) + } + } + return "" +} + +func (m *Model) getSuggestions(sugs [][]rune) []string { + suggestions := make([]string, len(sugs)) + for i, s := range sugs { + suggestions[i] = string(s) + } + return suggestions +} + +// AvailableSuggestions returns the list of available suggestions. +func (m *Model) AvailableSuggestions() []string { + return m.getSuggestions(m.suggestions) +} + +// MatchedSuggestions returns the list of matched suggestions. +func (m *Model) MatchedSuggestions() []string { + return m.getSuggestions(m.matchedSuggestions) +} + +// CurrentSuggestionIndex returns the currently selected suggestion index. +func (m *Model) CurrentSuggestionIndex() int { + return m.currentSuggestionIndex +} + +// CurrentSuggestion returns the currently selected suggestion. +func (m *Model) CurrentSuggestion() string { + if m.currentSuggestionIndex >= len(m.matchedSuggestions) { + return "" + } + + return string(m.matchedSuggestions[m.currentSuggestionIndex]) +} + +// canAcceptSuggestion returns whether there is an acceptable suggestion to +// autocomplete the current value. +func (m *Model) canAcceptSuggestion() bool { + return len(m.matchedSuggestions) > 0 +} + +// updateSuggestions refreshes the list of matching suggestions. +func (m *Model) updateSuggestions() { + if !m.ShowSuggestions { + return + } + + if len(m.value) <= 0 || len(m.suggestions) <= 0 { + m.matchedSuggestions = [][]rune{} + return + } + + matches := [][]rune{} + for _, s := range m.suggestions { + suggestion := string(s) + + if strings.HasPrefix(strings.ToLower(suggestion), strings.ToLower(string(m.value))) { + matches = append(matches, []rune(suggestion)) + } + } + if !reflect.DeepEqual(matches, m.matchedSuggestions) { + m.currentSuggestionIndex = 0 + } + + m.matchedSuggestions = matches +} + +// nextSuggestion selects the next suggestion. +func (m *Model) nextSuggestion() { + m.currentSuggestionIndex = (m.currentSuggestionIndex + 1) + if m.currentSuggestionIndex >= len(m.matchedSuggestions) { + m.currentSuggestionIndex = 0 + } +} + +// previousSuggestion selects the previous suggestion. +func (m *Model) previousSuggestion() { + m.currentSuggestionIndex = (m.currentSuggestionIndex - 1) + if m.currentSuggestionIndex < 0 { + m.currentSuggestionIndex = len(m.matchedSuggestions) - 1 + } +} + +func (m Model) validate(v []rune) error { + if m.Validate != nil { + return m.Validate(string(v)) + } + return nil +} diff --git a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml index 4fac29c2..929cb0ac 100644 --- a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml +++ b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml @@ -26,6 +26,10 @@ linters: - whitespace - wrapcheck exclusions: + rules: + - text: '(slog|log)\.\w+' + linters: + - noctx generated: lax presets: - common-false-positives diff --git a/vendor/github.com/charmbracelet/bubbletea/LICENSE b/vendor/github.com/charmbracelet/bubbletea/LICENSE index 31d76c1c..be3f0c19 100644 --- a/vendor/github.com/charmbracelet/bubbletea/LICENSE +++ b/vendor/github.com/charmbracelet/bubbletea/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2020-2023 Charmbracelet, Inc +Copyright (c) 2020-2025 Charmbracelet, Inc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/charmbracelet/bubbletea/README.md b/vendor/github.com/charmbracelet/bubbletea/README.md index b3acf900..83f3da07 100644 --- a/vendor/github.com/charmbracelet/bubbletea/README.md +++ b/vendor/github.com/charmbracelet/bubbletea/README.md @@ -9,7 +9,7 @@
Latest Release GoDoc - Build Status + Build Status

The fun, functional and stateful way to build terminal apps. A Go framework @@ -395,6 +395,6 @@ of days past. Part of [Charm](https://charm.sh). -The Charm logo +The Charm logo Charm热爱开源 • Charm loves open source • نحنُ نحب المصادر المفتوحة diff --git a/vendor/github.com/charmbracelet/bubbletea/commands.go b/vendor/github.com/charmbracelet/bubbletea/commands.go index bfa3b704..ec9dac28 100644 --- a/vendor/github.com/charmbracelet/bubbletea/commands.go +++ b/vendor/github.com/charmbracelet/bubbletea/commands.go @@ -13,6 +13,27 @@ import ( // return tea.Batch(someCommand, someOtherCommand) // } func Batch(cmds ...Cmd) Cmd { + return compactCmds[BatchMsg](cmds) +} + +// BatchMsg is a message used to perform a bunch of commands concurrently with +// no ordering guarantees. You can send a BatchMsg with Batch. +type BatchMsg []Cmd + +// Sequence runs the given commands one at a time, in order. Contrast this with +// Batch, which runs commands concurrently. +func Sequence(cmds ...Cmd) Cmd { + return compactCmds[sequenceMsg](cmds) +} + +// sequenceMsg is used internally to run the given commands in order. +type sequenceMsg []Cmd + +// compactCmds ignores any nil commands in cmds, and returns the most direct +// command possible. That is, considering the non-nil commands, if there are +// none it returns nil, if there is exactly one it returns that command +// directly, else it returns the non-nil commands as type T. +func compactCmds[T ~[]Cmd](cmds []Cmd) Cmd { var validCmds []Cmd //nolint:prealloc for _, c := range cmds { if c == nil { @@ -27,26 +48,11 @@ func Batch(cmds ...Cmd) Cmd { return validCmds[0] default: return func() Msg { - return BatchMsg(validCmds) + return T(validCmds) } } } -// BatchMsg is a message used to perform a bunch of commands concurrently with -// no ordering guarantees. You can send a BatchMsg with Batch. -type BatchMsg []Cmd - -// Sequence runs the given commands one at a time, in order. Contrast this with -// Batch, which runs commands concurrently. -func Sequence(cmds ...Cmd) Cmd { - return func() Msg { - return sequenceMsg(cmds) - } -} - -// sequenceMsg is used internally to run the given commands in order. -type sequenceMsg []Cmd - // Every is a command that ticks in sync with the system clock. So, if you // wanted to tick with the system clock every second, minute or hour you // could use this. It's also handy for having different things tick in sync. diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go index d76617d3..1e48f70c 100644 --- a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go @@ -108,7 +108,7 @@ func prepareConsole(input windows.Handle, modes ...uint32) (originalMode uint32, return originalMode, nil } -// cancelMixin represents a goroutine-safe cancelation status. +// cancelMixin represents a goroutine-safe cancellation status. type cancelMixin struct { unsafeCanceled bool lock sync.Mutex diff --git a/vendor/github.com/charmbracelet/bubbletea/key_windows.go b/vendor/github.com/charmbracelet/bubbletea/key_windows.go index ac983739..648bd784 100644 --- a/vendor/github.com/charmbracelet/bubbletea/key_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/key_windows.go @@ -109,12 +109,12 @@ func peekAndReadConsInput(con *conInputReader) ([]coninput.InputRecord, error) { return events, nil } -// Convert i to unit32 or panic if it cannot be converted. Check satisifes lint G115. +// Convert i to unit32 or panic if it cannot be converted. Check satisfies lint G115. func intToUint32OrDie(i int) uint32 { if i < 0 { panic("cannot convert numEvents " + fmt.Sprint(i) + " to uint32") } - return uint32(i) + return uint32(i) //nolint:gosec } // Keeps peeking until there is data or the input is cancelled. @@ -158,16 +158,16 @@ func mouseEventButton(p, s coninput.ButtonState) (button MouseButton, action Mou return button, action } - switch { - case btn == coninput.FROM_LEFT_1ST_BUTTON_PRESSED: // left button + switch btn { + case coninput.FROM_LEFT_1ST_BUTTON_PRESSED: // left button button = MouseButtonLeft - case btn == coninput.RIGHTMOST_BUTTON_PRESSED: // right button + case coninput.RIGHTMOST_BUTTON_PRESSED: // right button button = MouseButtonRight - case btn == coninput.FROM_LEFT_2ND_BUTTON_PRESSED: // middle button + case coninput.FROM_LEFT_2ND_BUTTON_PRESSED: // middle button button = MouseButtonMiddle - case btn == coninput.FROM_LEFT_3RD_BUTTON_PRESSED: // unknown (possibly mouse backward) + case coninput.FROM_LEFT_3RD_BUTTON_PRESSED: // unknown (possibly mouse backward) button = MouseButtonBackward - case btn == coninput.FROM_LEFT_4TH_BUTTON_PRESSED: // unknown (possibly mouse forward) + case coninput.FROM_LEFT_4TH_BUTTON_PRESSED: // unknown (possibly mouse forward) button = MouseButtonForward } diff --git a/vendor/github.com/charmbracelet/bubbletea/screen.go b/vendor/github.com/charmbracelet/bubbletea/screen.go index dfec48f0..02e1bfb0 100644 --- a/vendor/github.com/charmbracelet/bubbletea/screen.go +++ b/vendor/github.com/charmbracelet/bubbletea/screen.go @@ -131,7 +131,7 @@ func EnableBracketedPaste() Msg { type enableBracketedPasteMsg struct{} // DisableBracketedPaste is a special command that tells the Bubble Tea program -// to accept bracketed paste input. +// to stop processing bracketed paste input. // // Note that bracketed paste will be automatically disabled when the // program quits. diff --git a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go index d6dce6db..969a58c2 100644 --- a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go +++ b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go @@ -277,7 +277,7 @@ func (r *standardRenderer) flush() { // using the full terminal window. buf.WriteString(ansi.CursorPosition(0, len(newLines))) } else { - buf.WriteString(ansi.CursorBackward(r.width)) + buf.WriteByte('\r') } _, _ = r.out.Write(buf.Bytes()) diff --git a/vendor/github.com/charmbracelet/bubbletea/tea.go b/vendor/github.com/charmbracelet/bubbletea/tea.go index c4866f22..db84343e 100644 --- a/vendor/github.com/charmbracelet/bubbletea/tea.go +++ b/vendor/github.com/charmbracelet/bubbletea/tea.go @@ -24,7 +24,6 @@ import ( "github.com/charmbracelet/x/term" "github.com/muesli/cancelreader" - "golang.org/x/sync/errgroup" ) // ErrProgramPanic is returned by [Program.Run] when the program recovers from a panic. @@ -73,7 +72,7 @@ const ( customInput ) -// String implements the stringer interface for [inputType]. It is inteded to +// String implements the stringer interface for [inputType]. It is intended to // be used in testing. func (i inputType) String() string { return [...]string{ @@ -220,7 +219,7 @@ func Suspend() Msg { // You can send this message with [Suspend()]. type SuspendMsg struct{} -// ResumeMsg can be listen to to do something once a program is resumed back +// ResumeMsg can be listen to do something once a program is resumed back // from a suspend state. type ResumeMsg struct{} @@ -472,42 +471,12 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) { p.exec(msg.cmd, msg.fn) case BatchMsg: - for _, cmd := range msg { - select { - case <-p.ctx.Done(): - return model, nil - case cmds <- cmd: - } - } + go p.execBatchMsg(msg) continue case sequenceMsg: - go func() { - // Execute commands one at a time, in order. - for _, cmd := range msg { - if cmd == nil { - continue - } - - msg := cmd() - if batchMsg, ok := msg.(BatchMsg); ok { - g, _ := errgroup.WithContext(p.ctx) - for _, cmd := range batchMsg { - cmd := cmd - g.Go(func() error { - p.Send(cmd()) - return nil - }) - } - - //nolint:errcheck,gosec - g.Wait() // wait for all commands from batch msg to finish - continue - } - - p.Send(msg) - } - }() + go p.execSequenceMsg(msg) + continue case setWindowTitleMsg: p.SetWindowTitle(string(msg)) @@ -535,6 +504,74 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) { } } +func (p *Program) execSequenceMsg(msg sequenceMsg) { + if !p.startupOptions.has(withoutCatchPanics) { + defer func() { + if r := recover(); r != nil { + p.recoverFromGoPanic(r) + } + }() + } + + // Execute commands one at a time, in order. + for _, cmd := range msg { + if cmd == nil { + continue + } + msg := cmd() + switch msg := msg.(type) { + case BatchMsg: + p.execBatchMsg(msg) + case sequenceMsg: + p.execSequenceMsg(msg) + default: + p.Send(msg) + } + } +} + +func (p *Program) execBatchMsg(msg BatchMsg) { + if !p.startupOptions.has(withoutCatchPanics) { + defer func() { + if r := recover(); r != nil { + p.recoverFromGoPanic(r) + } + }() + } + + // Execute commands one at a time. + var wg sync.WaitGroup + for _, cmd := range msg { + if cmd == nil { + continue + } + wg.Add(1) + go func() { + defer wg.Done() + + if !p.startupOptions.has(withoutCatchPanics) { + defer func() { + if r := recover(); r != nil { + p.recoverFromGoPanic(r) + } + }() + } + + msg := cmd() + switch msg := msg.(type) { + case BatchMsg: + p.execBatchMsg(msg) + case sequenceMsg: + p.execSequenceMsg(msg) + default: + p.Send(msg) + } + }() + } + + wg.Wait() // wait for all commands from batch msg to finish +} + // Run initializes the program and runs its event loops, blocking until it gets // terminated by either [Program.Quit], [Program.Kill], or its signal handler. // Returns the final model. diff --git a/vendor/github.com/charmbracelet/bubbletea/tty_windows.go b/vendor/github.com/charmbracelet/bubbletea/tty_windows.go index 154491a6..211151f7 100644 --- a/vendor/github.com/charmbracelet/bubbletea/tty_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/tty_windows.go @@ -56,7 +56,7 @@ func (p *Program) initInput() (err error) { // Open the Windows equivalent of a TTY. func openInputTTY() (*os.File, error) { - f, err := os.OpenFile("CONIN$", os.O_RDWR, 0o644) + f, err := os.OpenFile("CONIN$", os.O_RDWR, 0o644) //nolint:gosec if err != nil { return nil, fmt.Errorf("error opening file: %w", err) } diff --git a/vendor/github.com/charmbracelet/x/ansi/inband.go b/vendor/github.com/charmbracelet/x/ansi/inband.go new file mode 100644 index 00000000..cf29b248 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/inband.go @@ -0,0 +1,24 @@ +package ansi + +import "fmt" + +// InBandResize encodes an in-band terminal resize event sequence. +// +// CSI 48 ; height_cells ; widht_cells ; height_pixels ; width_pixels t +// +// See https://gist.github.com/rockorager/e695fb2924d36b2bcf1fff4a3704bd83 +func InBandResize(heightCells, widthCells, heightPixels, widthPixels int) string { + if heightCells < 0 { + heightCells = 0 + } + if widthCells < 0 { + widthCells = 0 + } + if heightPixels < 0 { + heightPixels = 0 + } + if widthPixels < 0 { + widthPixels = 0 + } + return fmt.Sprintf("\x1b[48;%d;%d;%d;%dt", heightCells, widthCells, heightPixels, widthPixels) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/palette.go b/vendor/github.com/charmbracelet/x/ansi/palette.go new file mode 100644 index 00000000..cc4519b8 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/palette.go @@ -0,0 +1,34 @@ +package ansi + +import ( + "fmt" + "image/color" +) + +// SetPalette sets the palette color for the given index. The index is a 16 +// color index between 0 and 15. The color is a 24-bit RGB color. +// +// OSC P n rrggbb BEL +// +// Where n is the color index in hex (0-f), and rrggbb is the color in +// hexadecimal format (e.g., ff0000 for red). +// +// This sequence is specific to the Linux Console and may not work in other +// terminal emulators. +// +// See https://man7.org/linux/man-pages/man4/console_codes.4.html +func SetPalette(i int, c color.Color) string { + if c == nil || i < 0 || i > 15 { + return "" + } + r, g, b, _ := c.RGBA() + return fmt.Sprintf("\x1b]P%x%02x%02x%02x\x07", i, r>>8, g>>8, b>>8) +} + +// ResetPalette resets the color palette to the default values. +// +// This sequence is specific to the Linux Console and may not work in other +// terminal emulators. +// +// See https://man7.org/linux/man-pages/man4/console_codes.4.html +const ResetPalette = "\x1b]R\x07" diff --git a/vendor/github.com/charmbracelet/x/ansi/progress.go b/vendor/github.com/charmbracelet/x/ansi/progress.go new file mode 100644 index 00000000..19441d1e --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/progress.go @@ -0,0 +1,49 @@ +package ansi + +import "strconv" + +// ResetProgressBar is a sequence that resets the progress bar to its default +// state (hidden). +// +// OSC 9 ; 4 ; 0 BEL +// +// See: https://learn.microsoft.com/en-us/windows/terminal/tutorials/progress-bar-sequences +const ResetProgressBar = "\x1b]9;4;0\x07" + +// SetProgressBar returns a sequence for setting the progress bar to a specific +// percentage (0-100) in the "default" state. +// +// OSC 9 ; 4 ; 1 Percentage BEL +// +// See: https://learn.microsoft.com/en-us/windows/terminal/tutorials/progress-bar-sequences +func SetProgressBar(percentage int) string { + return "\x1b]9;4;1;" + strconv.Itoa(min(max(0, percentage), 100)) + "\x07" +} + +// SetErrorProgressBar returns a sequence for setting the progress bar to a +// specific percentage (0-100) in the "Error" state.. +// +// OSC 9 ; 4 ; 2 Percentage BEL +// +// See: https://learn.microsoft.com/en-us/windows/terminal/tutorials/progress-bar-sequences +func SetErrorProgressBar(percentage int) string { + return "\x1b]9;4;2;" + strconv.Itoa(min(max(0, percentage), 100)) + "\x07" +} + +// SetIndeterminateProgressBar is a sequence that sets the progress bar to the +// indeterminate state. +// +// OSC 9 ; 4 ; 3 BEL +// +// See: https://learn.microsoft.com/en-us/windows/terminal/tutorials/progress-bar-sequences +const SetIndeterminateProgressBar = "\x1b]9;4;3\x07" + +// SetWarningProgressBar is a sequence that sets the progress bar to the +// "Warning" state. +// +// OSC 9 ; 4 ; 4 Percentage BEL +// +// See: https://learn.microsoft.com/en-us/windows/terminal/tutorials/progress-bar-sequences +func SetWarningProgressBar(percentage int) string { + return "\x1b]9;4;4;" + strconv.Itoa(min(max(0, percentage), 100)) + "\x07" +} diff --git a/vendor/github.com/charmbracelet/x/ansi/winop.go b/vendor/github.com/charmbracelet/x/ansi/winop.go index 0238780d..8487178b 100644 --- a/vendor/github.com/charmbracelet/x/ansi/winop.go +++ b/vendor/github.com/charmbracelet/x/ansi/winop.go @@ -8,16 +8,22 @@ import ( const ( // ResizeWindowWinOp is a window operation that resizes the terminal // window. + // + // Deprecated: Use constant number directly with [WindowOp]. ResizeWindowWinOp = 4 // RequestWindowSizeWinOp is a window operation that requests a report of // the size of the terminal window in pixels. The response is in the form: // CSI 4 ; height ; width t + // + // Deprecated: Use constant number directly with [WindowOp]. RequestWindowSizeWinOp = 14 // RequestCellSizeWinOp is a window operation that requests a report of // the size of the terminal cell size in pixels. The response is in the form: // CSI 6 ; height ; width t + // + // Deprecated: Use constant number directly with [WindowOp]. RequestCellSizeWinOp = 16 ) diff --git a/vendor/github.com/clipperhouse/uax29/v2/LICENSE b/vendor/github.com/clipperhouse/uax29/v2/LICENSE new file mode 100644 index 00000000..6ae86a9a --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Matt Sherman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md new file mode 100644 index 00000000..4d9a6d71 --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/README.md @@ -0,0 +1,82 @@ +An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode version 15.0.0. + +## Quick start + +``` +go get "github.com/clipperhouse/uax29/v2/graphemes" +``` + +```go +import "github.com/clipperhouse/uax29/v2/graphemes" + +text := "Hello, 世界. Nice dog! 👍🐶" + +tokens := graphemes.FromString(text) + +for tokens.Next() { // Next() returns true until end of data + fmt.Println(tokens.Value()) // Do something with the current grapheme +} +``` + +[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes) + +_A grapheme is a “single visible character”, which might be a simple as a single letter, or a complex emoji that consists of several Unicode code points._ + +## Conformance + +We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29). Status: + +![Go](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg) + +## APIs + +### If you have a `string` + +```go +text := "Hello, 世界. Nice dog! 👍🐶" + +tokens := graphemes.FromString(text) + +for tokens.Next() { // Next() returns true until end of data + fmt.Println(tokens.Value()) // Do something with the current grapheme +} +``` + +### If you have an `io.Reader` + +`FromReader` embeds a [`bufio.Scanner`](https://pkg.go.dev/bufio#Scanner), so just use those methods. + +```go +r := getYourReader() // from a file or network maybe +tokens := graphemes.FromReader(r) + +for tokens.Scan() { // Scan() returns true until error or EOF + fmt.Println(tokens.Text()) // Do something with the current grapheme +} + +if tokens.Err() != nil { // Check the error + log.Fatal(tokens.Err()) +} +``` + +### If you have a `[]byte` + +```go +b := []byte("Hello, 世界. Nice dog! 👍🐶") + +tokens := graphemes.FromBytes(b) + +for tokens.Next() { // Next() returns true until end of data + fmt.Println(tokens.Value()) // Do something with the current grapheme +} +``` + +### Performance + +On a Mac M2 laptop, we see around 200MB/s, or around 100 million graphemes per second. You should see ~constant memory, and no allocations. + +### Invalid inputs + +Invalid UTF-8 input is considered undefined behavior. We test to ensure that bad inputs will not cause pathological outcomes, such as a panic or infinite loop. Callers should expect “garbage-in, garbage-out”. + +Your pipeline should probably include a call to [`utf8.Valid()`](https://pkg.go.dev/unicode/utf8#Valid). diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go new file mode 100644 index 00000000..14b4ea2c --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/iterator.go @@ -0,0 +1,28 @@ +package graphemes + +import "github.com/clipperhouse/uax29/v2/internal/iterators" + +type Iterator[T iterators.Stringish] struct { + *iterators.Iterator[T] +} + +var ( + splitFuncString = splitFunc[string] + splitFuncBytes = splitFunc[[]byte] +) + +// FromString returns an iterator for the grapheme clusters in the input string. +// Iterate while Next() is true, and access the grapheme via Value(). +func FromString(s string) Iterator[string] { + return Iterator[string]{ + iterators.New(splitFuncString, s), + } +} + +// FromBytes returns an iterator for the grapheme clusters in the input bytes. +// Iterate while Next() is true, and access the grapheme via Value(). +func FromBytes(b []byte) Iterator[[]byte] { + return Iterator[[]byte]{ + iterators.New(splitFuncBytes, b), + } +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/reader.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/reader.go new file mode 100644 index 00000000..9aa00661 --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/reader.go @@ -0,0 +1,25 @@ +// Package graphemes implements Unicode grapheme cluster boundaries: https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries +package graphemes + +import ( + "bufio" + "io" +) + +type Scanner struct { + *bufio.Scanner +} + +// FromReader returns a Scanner, to split graphemes per +// https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries. +// +// It embeds a [bufio.Scanner], so you can use its methods. +// +// Iterate through graphemes by calling Scan() until false, then check Err(). +func FromReader(r io.Reader) *Scanner { + sc := bufio.NewScanner(r) + sc.Split(SplitFunc) + return &Scanner{ + Scanner: sc, + } +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go new file mode 100644 index 00000000..08987f54 --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/splitfunc.go @@ -0,0 +1,174 @@ +package graphemes + +import ( + "bufio" + + "github.com/clipperhouse/uax29/v2/internal/iterators" +) + +// is determines if lookup intersects propert(ies) +func (lookup property) is(properties property) bool { + return (lookup & properties) != 0 +} + +const _Ignore = _Extend + +// SplitFunc is a bufio.SplitFunc implementation of Unicode grapheme cluster segmentation, for use with bufio.Scanner. +// +// See https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries. +var SplitFunc bufio.SplitFunc = splitFunc[[]byte] + +func splitFunc[T iterators.Stringish](data T, atEOF bool) (advance int, token T, err error) { + var empty T + if len(data) == 0 { + return 0, empty, nil + } + + // These vars are stateful across loop iterations + var pos int + var lastExIgnore property = 0 // "last excluding ignored categories" + var lastLastExIgnore property = 0 // "last one before that" + var regionalIndicatorCount int + + // Rules are usually of the form Cat1 × Cat2; "current" refers to the first property + // to the right of the ×, from which we look back or forward + + current, w := lookup(data[pos:]) + if w == 0 { + if !atEOF { + // Rune extends past current data, request more + return 0, empty, nil + } + pos = len(data) + return pos, data[:pos], nil + } + + // https://unicode.org/reports/tr29/#GB1 + // Start of text always advances + pos += w + + for { + eot := pos == len(data) // "end of text" + + if eot { + if !atEOF { + // Token extends past current data, request more + return 0, empty, nil + } + + // https://unicode.org/reports/tr29/#GB2 + break + } + + /* + We've switched the evaluation order of GB1↓ and GB2↑. It's ok: + because we've checked for len(data) at the top of this function, + sot and eot are mutually exclusive, order doesn't matter. + */ + + // Rules are usually of the form Cat1 × Cat2; "current" refers to the first property + // to the right of the ×, from which we look back or forward + + // Remember previous properties to avoid lookups/lookbacks + last := current + if !last.is(_Ignore) { + lastLastExIgnore = lastExIgnore + lastExIgnore = last + } + + current, w = lookup(data[pos:]) + if w == 0 { + if atEOF { + // Just return the bytes, we can't do anything with them + pos = len(data) + break + } + // Rune extends past current data, request more + return 0, empty, nil + } + + // Optimization: no rule can possibly apply + if current|last == 0 { // i.e. both are zero + break + } + + // https://unicode.org/reports/tr29/#GB3 + if current.is(_LF) && last.is(_CR) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB4 + // https://unicode.org/reports/tr29/#GB5 + if (current | last).is(_Control | _CR | _LF) { + break + } + + // https://unicode.org/reports/tr29/#GB6 + if current.is(_L|_V|_LV|_LVT) && last.is(_L) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB7 + if current.is(_V|_T) && last.is(_LV|_V) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB8 + if current.is(_T) && last.is(_LVT|_T) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB9 + if current.is(_Extend | _ZWJ) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB9a + if current.is(_SpacingMark) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB9b + if last.is(_Prepend) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB9c + // TODO(clipperhouse): + // It appears to be added in Unicode 15.1.0: + // https://unicode.org/versions/Unicode15.1.0/#Migration + // This package currently supports Unicode 15.0.0, so + // out of scope for now + + // https://unicode.org/reports/tr29/#GB11 + if current.is(_ExtendedPictographic) && last.is(_ZWJ) && lastLastExIgnore.is(_ExtendedPictographic) { + pos += w + continue + } + + // https://unicode.org/reports/tr29/#GB12 + // https://unicode.org/reports/tr29/#GB13 + if (current & last).is(_RegionalIndicator) { + regionalIndicatorCount++ + + odd := regionalIndicatorCount%2 == 1 + if odd { + pos += w + continue + } + } + + // If we fall through all the above rules, it's a grapheme cluster break + break + } + + // Return token + return pos, data[:pos], nil +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go new file mode 100644 index 00000000..c8c6c33b --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/graphemes/trie.go @@ -0,0 +1,1409 @@ +package graphemes + +// generated by github.com/clipperhouse/uax29/v2 +// from https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt + +import "github.com/clipperhouse/uax29/v2/internal/iterators" + +type property uint16 + +const ( + _CR property = 1 << iota + _Control + _Extend + _ExtendedPictographic + _L + _LF + _LV + _LVT + _Prepend + _RegionalIndicator + _SpacingMark + _T + _V + _ZWJ +) + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func lookup[T iterators.Stringish](s T) (v property, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return graphemesValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := graphemesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := graphemesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = graphemesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := graphemesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = graphemesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = graphemesIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// graphemesTrie. Total size: 29120 bytes (28.44 KiB). Checksum: 80ad0c5ab9375f7. +// type graphemesTrie struct { } + +// func newGraphemesTrie(i int) *graphemesTrie { +// return &graphemesTrie{} +// } + +// lookupValue determines the type of block n and looks up the value for b. +func lookupValue(n uint32, b byte) property { + switch { + default: + return property(graphemesValues[n<<6+uint32(b)]) + } +} + +// graphemesValues: 215 blocks, 13760 entries, 27520 bytes +// The third block is the zero block. +var graphemesValues = [13760]property{ + // Block 0x0, offset 0x0 + 0x00: 0x0002, 0x01: 0x0002, 0x02: 0x0002, 0x03: 0x0002, 0x04: 0x0002, 0x05: 0x0002, + 0x06: 0x0002, 0x07: 0x0002, 0x08: 0x0002, 0x09: 0x0002, 0x0a: 0x0020, 0x0b: 0x0002, + 0x0c: 0x0002, 0x0d: 0x0001, 0x0e: 0x0002, 0x0f: 0x0002, 0x10: 0x0002, 0x11: 0x0002, + 0x12: 0x0002, 0x13: 0x0002, 0x14: 0x0002, 0x15: 0x0002, 0x16: 0x0002, 0x17: 0x0002, + 0x18: 0x0002, 0x19: 0x0002, 0x1a: 0x0002, 0x1b: 0x0002, 0x1c: 0x0002, 0x1d: 0x0002, + 0x1e: 0x0002, 0x1f: 0x0002, + // Block 0x1, offset 0x40 + 0x7f: 0x0002, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0002, 0xc1: 0x0002, 0xc2: 0x0002, 0xc3: 0x0002, 0xc4: 0x0002, 0xc5: 0x0002, + 0xc6: 0x0002, 0xc7: 0x0002, 0xc8: 0x0002, 0xc9: 0x0002, 0xca: 0x0002, 0xcb: 0x0002, + 0xcc: 0x0002, 0xcd: 0x0002, 0xce: 0x0002, 0xcf: 0x0002, 0xd0: 0x0002, 0xd1: 0x0002, + 0xd2: 0x0002, 0xd3: 0x0002, 0xd4: 0x0002, 0xd5: 0x0002, 0xd6: 0x0002, 0xd7: 0x0002, + 0xd8: 0x0002, 0xd9: 0x0002, 0xda: 0x0002, 0xdb: 0x0002, 0xdc: 0x0002, 0xdd: 0x0002, + 0xde: 0x0002, 0xdf: 0x0002, + 0xe9: 0x0008, + 0xed: 0x0002, 0xee: 0x0008, + // Block 0x4, offset 0x100 + 0x100: 0x0004, 0x101: 0x0004, 0x102: 0x0004, 0x103: 0x0004, 0x104: 0x0004, 0x105: 0x0004, + 0x106: 0x0004, 0x107: 0x0004, 0x108: 0x0004, 0x109: 0x0004, 0x10a: 0x0004, 0x10b: 0x0004, + 0x10c: 0x0004, 0x10d: 0x0004, 0x10e: 0x0004, 0x10f: 0x0004, 0x110: 0x0004, 0x111: 0x0004, + 0x112: 0x0004, 0x113: 0x0004, 0x114: 0x0004, 0x115: 0x0004, 0x116: 0x0004, 0x117: 0x0004, + 0x118: 0x0004, 0x119: 0x0004, 0x11a: 0x0004, 0x11b: 0x0004, 0x11c: 0x0004, 0x11d: 0x0004, + 0x11e: 0x0004, 0x11f: 0x0004, 0x120: 0x0004, 0x121: 0x0004, 0x122: 0x0004, 0x123: 0x0004, + 0x124: 0x0004, 0x125: 0x0004, 0x126: 0x0004, 0x127: 0x0004, 0x128: 0x0004, 0x129: 0x0004, + 0x12a: 0x0004, 0x12b: 0x0004, 0x12c: 0x0004, 0x12d: 0x0004, 0x12e: 0x0004, 0x12f: 0x0004, + 0x130: 0x0004, 0x131: 0x0004, 0x132: 0x0004, 0x133: 0x0004, 0x134: 0x0004, 0x135: 0x0004, + 0x136: 0x0004, 0x137: 0x0004, 0x138: 0x0004, 0x139: 0x0004, 0x13a: 0x0004, 0x13b: 0x0004, + 0x13c: 0x0004, 0x13d: 0x0004, 0x13e: 0x0004, 0x13f: 0x0004, + // Block 0x5, offset 0x140 + 0x140: 0x0004, 0x141: 0x0004, 0x142: 0x0004, 0x143: 0x0004, 0x144: 0x0004, 0x145: 0x0004, + 0x146: 0x0004, 0x147: 0x0004, 0x148: 0x0004, 0x149: 0x0004, 0x14a: 0x0004, 0x14b: 0x0004, + 0x14c: 0x0004, 0x14d: 0x0004, 0x14e: 0x0004, 0x14f: 0x0004, 0x150: 0x0004, 0x151: 0x0004, + 0x152: 0x0004, 0x153: 0x0004, 0x154: 0x0004, 0x155: 0x0004, 0x156: 0x0004, 0x157: 0x0004, + 0x158: 0x0004, 0x159: 0x0004, 0x15a: 0x0004, 0x15b: 0x0004, 0x15c: 0x0004, 0x15d: 0x0004, + 0x15e: 0x0004, 0x15f: 0x0004, 0x160: 0x0004, 0x161: 0x0004, 0x162: 0x0004, 0x163: 0x0004, + 0x164: 0x0004, 0x165: 0x0004, 0x166: 0x0004, 0x167: 0x0004, 0x168: 0x0004, 0x169: 0x0004, + 0x16a: 0x0004, 0x16b: 0x0004, 0x16c: 0x0004, 0x16d: 0x0004, 0x16e: 0x0004, 0x16f: 0x0004, + // Block 0x6, offset 0x180 + 0x183: 0x0004, 0x184: 0x0004, 0x185: 0x0004, + 0x186: 0x0004, 0x187: 0x0004, 0x188: 0x0004, 0x189: 0x0004, + // Block 0x7, offset 0x1c0 + 0x1d1: 0x0004, + 0x1d2: 0x0004, 0x1d3: 0x0004, 0x1d4: 0x0004, 0x1d5: 0x0004, 0x1d6: 0x0004, 0x1d7: 0x0004, + 0x1d8: 0x0004, 0x1d9: 0x0004, 0x1da: 0x0004, 0x1db: 0x0004, 0x1dc: 0x0004, 0x1dd: 0x0004, + 0x1de: 0x0004, 0x1df: 0x0004, 0x1e0: 0x0004, 0x1e1: 0x0004, 0x1e2: 0x0004, 0x1e3: 0x0004, + 0x1e4: 0x0004, 0x1e5: 0x0004, 0x1e6: 0x0004, 0x1e7: 0x0004, 0x1e8: 0x0004, 0x1e9: 0x0004, + 0x1ea: 0x0004, 0x1eb: 0x0004, 0x1ec: 0x0004, 0x1ed: 0x0004, 0x1ee: 0x0004, 0x1ef: 0x0004, + 0x1f0: 0x0004, 0x1f1: 0x0004, 0x1f2: 0x0004, 0x1f3: 0x0004, 0x1f4: 0x0004, 0x1f5: 0x0004, + 0x1f6: 0x0004, 0x1f7: 0x0004, 0x1f8: 0x0004, 0x1f9: 0x0004, 0x1fa: 0x0004, 0x1fb: 0x0004, + 0x1fc: 0x0004, 0x1fd: 0x0004, 0x1ff: 0x0004, + // Block 0x8, offset 0x200 + 0x201: 0x0004, 0x202: 0x0004, 0x204: 0x0004, 0x205: 0x0004, + 0x207: 0x0004, + // Block 0x9, offset 0x240 + 0x240: 0x0100, 0x241: 0x0100, 0x242: 0x0100, 0x243: 0x0100, 0x244: 0x0100, 0x245: 0x0100, + 0x250: 0x0004, 0x251: 0x0004, + 0x252: 0x0004, 0x253: 0x0004, 0x254: 0x0004, 0x255: 0x0004, 0x256: 0x0004, 0x257: 0x0004, + 0x258: 0x0004, 0x259: 0x0004, 0x25a: 0x0004, 0x25c: 0x0002, + // Block 0xa, offset 0x280 + 0x28b: 0x0004, + 0x28c: 0x0004, 0x28d: 0x0004, 0x28e: 0x0004, 0x28f: 0x0004, 0x290: 0x0004, 0x291: 0x0004, + 0x292: 0x0004, 0x293: 0x0004, 0x294: 0x0004, 0x295: 0x0004, 0x296: 0x0004, 0x297: 0x0004, + 0x298: 0x0004, 0x299: 0x0004, 0x29a: 0x0004, 0x29b: 0x0004, 0x29c: 0x0004, 0x29d: 0x0004, + 0x29e: 0x0004, 0x29f: 0x0004, + 0x2b0: 0x0004, + // Block 0xb, offset 0x2c0 + 0x2d6: 0x0004, 0x2d7: 0x0004, + 0x2d8: 0x0004, 0x2d9: 0x0004, 0x2da: 0x0004, 0x2db: 0x0004, 0x2dc: 0x0004, 0x2dd: 0x0100, + 0x2df: 0x0004, 0x2e0: 0x0004, 0x2e1: 0x0004, 0x2e2: 0x0004, 0x2e3: 0x0004, + 0x2e4: 0x0004, 0x2e7: 0x0004, 0x2e8: 0x0004, + 0x2ea: 0x0004, 0x2eb: 0x0004, 0x2ec: 0x0004, 0x2ed: 0x0004, + // Block 0xc, offset 0x300 + 0x30f: 0x0100, 0x311: 0x0004, + 0x330: 0x0004, 0x331: 0x0004, 0x332: 0x0004, 0x333: 0x0004, 0x334: 0x0004, 0x335: 0x0004, + 0x336: 0x0004, 0x337: 0x0004, 0x338: 0x0004, 0x339: 0x0004, 0x33a: 0x0004, 0x33b: 0x0004, + 0x33c: 0x0004, 0x33d: 0x0004, 0x33e: 0x0004, 0x33f: 0x0004, + // Block 0xd, offset 0x340 + 0x340: 0x0004, 0x341: 0x0004, 0x342: 0x0004, 0x343: 0x0004, 0x344: 0x0004, 0x345: 0x0004, + 0x346: 0x0004, 0x347: 0x0004, 0x348: 0x0004, 0x349: 0x0004, 0x34a: 0x0004, + // Block 0xe, offset 0x380 + 0x3a6: 0x0004, 0x3a7: 0x0004, 0x3a8: 0x0004, 0x3a9: 0x0004, + 0x3aa: 0x0004, 0x3ab: 0x0004, 0x3ac: 0x0004, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b0: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3eb: 0x0004, 0x3ec: 0x0004, 0x3ed: 0x0004, 0x3ee: 0x0004, 0x3ef: 0x0004, + 0x3f0: 0x0004, 0x3f1: 0x0004, 0x3f2: 0x0004, 0x3f3: 0x0004, + 0x3fd: 0x0004, + // Block 0x10, offset 0x400 + 0x416: 0x0004, 0x417: 0x0004, + 0x418: 0x0004, 0x419: 0x0004, 0x41b: 0x0004, 0x41c: 0x0004, 0x41d: 0x0004, + 0x41e: 0x0004, 0x41f: 0x0004, 0x420: 0x0004, 0x421: 0x0004, 0x422: 0x0004, 0x423: 0x0004, + 0x425: 0x0004, 0x426: 0x0004, 0x427: 0x0004, 0x429: 0x0004, + 0x42a: 0x0004, 0x42b: 0x0004, 0x42c: 0x0004, 0x42d: 0x0004, + // Block 0x11, offset 0x440 + 0x459: 0x0004, 0x45a: 0x0004, 0x45b: 0x0004, + // Block 0x12, offset 0x480 + 0x490: 0x0100, 0x491: 0x0100, + 0x498: 0x0004, 0x499: 0x0004, 0x49a: 0x0004, 0x49b: 0x0004, 0x49c: 0x0004, 0x49d: 0x0004, + 0x49e: 0x0004, 0x49f: 0x0004, + // Block 0x13, offset 0x4c0 + 0x4ca: 0x0004, 0x4cb: 0x0004, + 0x4cc: 0x0004, 0x4cd: 0x0004, 0x4ce: 0x0004, 0x4cf: 0x0004, 0x4d0: 0x0004, 0x4d1: 0x0004, + 0x4d2: 0x0004, 0x4d3: 0x0004, 0x4d4: 0x0004, 0x4d5: 0x0004, 0x4d6: 0x0004, 0x4d7: 0x0004, + 0x4d8: 0x0004, 0x4d9: 0x0004, 0x4da: 0x0004, 0x4db: 0x0004, 0x4dc: 0x0004, 0x4dd: 0x0004, + 0x4de: 0x0004, 0x4df: 0x0004, 0x4e0: 0x0004, 0x4e1: 0x0004, 0x4e2: 0x0100, 0x4e3: 0x0004, + 0x4e4: 0x0004, 0x4e5: 0x0004, 0x4e6: 0x0004, 0x4e7: 0x0004, 0x4e8: 0x0004, 0x4e9: 0x0004, + 0x4ea: 0x0004, 0x4eb: 0x0004, 0x4ec: 0x0004, 0x4ed: 0x0004, 0x4ee: 0x0004, 0x4ef: 0x0004, + 0x4f0: 0x0004, 0x4f1: 0x0004, 0x4f2: 0x0004, 0x4f3: 0x0004, 0x4f4: 0x0004, 0x4f5: 0x0004, + 0x4f6: 0x0004, 0x4f7: 0x0004, 0x4f8: 0x0004, 0x4f9: 0x0004, 0x4fa: 0x0004, 0x4fb: 0x0004, + 0x4fc: 0x0004, 0x4fd: 0x0004, 0x4fe: 0x0004, 0x4ff: 0x0004, + // Block 0x14, offset 0x500 + 0x500: 0x0004, 0x501: 0x0004, 0x502: 0x0004, 0x503: 0x0400, + 0x53a: 0x0004, 0x53b: 0x0400, + 0x53c: 0x0004, 0x53e: 0x0400, 0x53f: 0x0400, + // Block 0x15, offset 0x540 + 0x540: 0x0400, 0x541: 0x0004, 0x542: 0x0004, 0x543: 0x0004, 0x544: 0x0004, 0x545: 0x0004, + 0x546: 0x0004, 0x547: 0x0004, 0x548: 0x0004, 0x549: 0x0400, 0x54a: 0x0400, 0x54b: 0x0400, + 0x54c: 0x0400, 0x54d: 0x0004, 0x54e: 0x0400, 0x54f: 0x0400, 0x551: 0x0004, + 0x552: 0x0004, 0x553: 0x0004, 0x554: 0x0004, 0x555: 0x0004, 0x556: 0x0004, 0x557: 0x0004, + 0x562: 0x0004, 0x563: 0x0004, + // Block 0x16, offset 0x580 + 0x581: 0x0004, 0x582: 0x0400, 0x583: 0x0400, + 0x5bc: 0x0004, 0x5be: 0x0004, 0x5bf: 0x0400, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0400, 0x5c1: 0x0004, 0x5c2: 0x0004, 0x5c3: 0x0004, 0x5c4: 0x0004, + 0x5c7: 0x0400, 0x5c8: 0x0400, 0x5cb: 0x0400, + 0x5cc: 0x0400, 0x5cd: 0x0004, + 0x5d7: 0x0004, + 0x5e2: 0x0004, 0x5e3: 0x0004, + 0x5fe: 0x0004, + // Block 0x18, offset 0x600 + 0x601: 0x0004, 0x602: 0x0004, 0x603: 0x0400, + 0x63c: 0x0004, 0x63e: 0x0400, 0x63f: 0x0400, + // Block 0x19, offset 0x640 + 0x640: 0x0400, 0x641: 0x0004, 0x642: 0x0004, + 0x647: 0x0004, 0x648: 0x0004, 0x64b: 0x0004, + 0x64c: 0x0004, 0x64d: 0x0004, 0x651: 0x0004, + 0x670: 0x0004, 0x671: 0x0004, 0x675: 0x0004, + // Block 0x1a, offset 0x680 + 0x680: 0x0400, 0x681: 0x0004, 0x682: 0x0004, 0x683: 0x0004, 0x684: 0x0004, 0x685: 0x0004, + 0x687: 0x0004, 0x688: 0x0004, 0x689: 0x0400, 0x68b: 0x0400, + 0x68c: 0x0400, 0x68d: 0x0004, + 0x6a2: 0x0004, 0x6a3: 0x0004, + 0x6ba: 0x0004, 0x6bb: 0x0004, + 0x6bc: 0x0004, 0x6bd: 0x0004, 0x6be: 0x0004, 0x6bf: 0x0004, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x0004, 0x6c2: 0x0400, 0x6c3: 0x0400, + 0x6fc: 0x0004, 0x6fe: 0x0004, 0x6ff: 0x0004, + // Block 0x1c, offset 0x700 + 0x700: 0x0400, 0x701: 0x0004, 0x702: 0x0004, 0x703: 0x0004, 0x704: 0x0004, + 0x707: 0x0400, 0x708: 0x0400, 0x70b: 0x0400, + 0x70c: 0x0400, 0x70d: 0x0004, + 0x715: 0x0004, 0x716: 0x0004, 0x717: 0x0004, + 0x722: 0x0004, 0x723: 0x0004, + // Block 0x1d, offset 0x740 + 0x742: 0x0004, + 0x77e: 0x0004, 0x77f: 0x0400, + // Block 0x1e, offset 0x780 + 0x780: 0x0004, 0x781: 0x0400, 0x782: 0x0400, + 0x786: 0x0400, 0x787: 0x0400, 0x788: 0x0400, 0x78a: 0x0400, 0x78b: 0x0400, + 0x78c: 0x0400, 0x78d: 0x0004, + 0x797: 0x0004, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0004, 0x7c1: 0x0400, 0x7c2: 0x0400, 0x7c3: 0x0400, 0x7c4: 0x0004, + 0x7fc: 0x0004, 0x7fe: 0x0004, 0x7ff: 0x0004, + // Block 0x20, offset 0x800 + 0x800: 0x0004, 0x801: 0x0400, 0x802: 0x0400, 0x803: 0x0400, 0x804: 0x0400, + 0x806: 0x0004, 0x807: 0x0004, 0x808: 0x0004, 0x80a: 0x0004, 0x80b: 0x0004, + 0x80c: 0x0004, 0x80d: 0x0004, + 0x815: 0x0004, 0x816: 0x0004, + 0x822: 0x0004, 0x823: 0x0004, + // Block 0x21, offset 0x840 + 0x841: 0x0004, 0x842: 0x0400, 0x843: 0x0400, + 0x87c: 0x0004, 0x87e: 0x0400, 0x87f: 0x0004, + // Block 0x22, offset 0x880 + 0x880: 0x0400, 0x881: 0x0400, 0x882: 0x0004, 0x883: 0x0400, 0x884: 0x0400, + 0x886: 0x0004, 0x887: 0x0400, 0x888: 0x0400, 0x88a: 0x0400, 0x88b: 0x0400, + 0x88c: 0x0004, 0x88d: 0x0004, + 0x895: 0x0004, 0x896: 0x0004, + 0x8a2: 0x0004, 0x8a3: 0x0004, + 0x8b3: 0x0400, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0004, 0x8c1: 0x0004, 0x8c2: 0x0400, 0x8c3: 0x0400, + 0x8fb: 0x0004, + 0x8fc: 0x0004, 0x8fe: 0x0004, 0x8ff: 0x0400, + // Block 0x24, offset 0x900 + 0x900: 0x0400, 0x901: 0x0004, 0x902: 0x0004, 0x903: 0x0004, 0x904: 0x0004, + 0x906: 0x0400, 0x907: 0x0400, 0x908: 0x0400, 0x90a: 0x0400, 0x90b: 0x0400, + 0x90c: 0x0400, 0x90d: 0x0004, 0x90e: 0x0100, + 0x917: 0x0004, + 0x922: 0x0004, 0x923: 0x0004, + // Block 0x25, offset 0x940 + 0x941: 0x0004, 0x942: 0x0400, 0x943: 0x0400, + // Block 0x26, offset 0x980 + 0x98a: 0x0004, + 0x98f: 0x0004, 0x990: 0x0400, 0x991: 0x0400, + 0x992: 0x0004, 0x993: 0x0004, 0x994: 0x0004, 0x996: 0x0004, + 0x998: 0x0400, 0x999: 0x0400, 0x99a: 0x0400, 0x99b: 0x0400, 0x99c: 0x0400, 0x99d: 0x0400, + 0x99e: 0x0400, 0x99f: 0x0004, + 0x9b2: 0x0400, 0x9b3: 0x0400, + // Block 0x27, offset 0x9c0 + 0x9f1: 0x0004, 0x9f3: 0x0400, 0x9f4: 0x0004, 0x9f5: 0x0004, + 0x9f6: 0x0004, 0x9f7: 0x0004, 0x9f8: 0x0004, 0x9f9: 0x0004, 0x9fa: 0x0004, + // Block 0x28, offset 0xa00 + 0xa07: 0x0004, 0xa08: 0x0004, 0xa09: 0x0004, 0xa0a: 0x0004, 0xa0b: 0x0004, + 0xa0c: 0x0004, 0xa0d: 0x0004, 0xa0e: 0x0004, + // Block 0x29, offset 0xa40 + 0xa71: 0x0004, 0xa73: 0x0400, 0xa74: 0x0004, 0xa75: 0x0004, + 0xa76: 0x0004, 0xa77: 0x0004, 0xa78: 0x0004, 0xa79: 0x0004, 0xa7a: 0x0004, 0xa7b: 0x0004, + 0xa7c: 0x0004, + // Block 0x2a, offset 0xa80 + 0xa88: 0x0004, 0xa89: 0x0004, 0xa8a: 0x0004, 0xa8b: 0x0004, + 0xa8c: 0x0004, 0xa8d: 0x0004, 0xa8e: 0x0004, + // Block 0x2b, offset 0xac0 + 0xad8: 0x0004, 0xad9: 0x0004, + 0xaf5: 0x0004, + 0xaf7: 0x0004, 0xaf9: 0x0004, + 0xafe: 0x0400, 0xaff: 0x0400, + // Block 0x2c, offset 0xb00 + 0xb31: 0x0004, 0xb32: 0x0004, 0xb33: 0x0004, 0xb34: 0x0004, 0xb35: 0x0004, + 0xb36: 0x0004, 0xb37: 0x0004, 0xb38: 0x0004, 0xb39: 0x0004, 0xb3a: 0x0004, 0xb3b: 0x0004, + 0xb3c: 0x0004, 0xb3d: 0x0004, 0xb3e: 0x0004, 0xb3f: 0x0400, + // Block 0x2d, offset 0xb40 + 0xb40: 0x0004, 0xb41: 0x0004, 0xb42: 0x0004, 0xb43: 0x0004, 0xb44: 0x0004, + 0xb46: 0x0004, 0xb47: 0x0004, + 0xb4d: 0x0004, 0xb4e: 0x0004, 0xb4f: 0x0004, 0xb50: 0x0004, 0xb51: 0x0004, + 0xb52: 0x0004, 0xb53: 0x0004, 0xb54: 0x0004, 0xb55: 0x0004, 0xb56: 0x0004, 0xb57: 0x0004, + 0xb59: 0x0004, 0xb5a: 0x0004, 0xb5b: 0x0004, 0xb5c: 0x0004, 0xb5d: 0x0004, + 0xb5e: 0x0004, 0xb5f: 0x0004, 0xb60: 0x0004, 0xb61: 0x0004, 0xb62: 0x0004, 0xb63: 0x0004, + 0xb64: 0x0004, 0xb65: 0x0004, 0xb66: 0x0004, 0xb67: 0x0004, 0xb68: 0x0004, 0xb69: 0x0004, + 0xb6a: 0x0004, 0xb6b: 0x0004, 0xb6c: 0x0004, 0xb6d: 0x0004, 0xb6e: 0x0004, 0xb6f: 0x0004, + 0xb70: 0x0004, 0xb71: 0x0004, 0xb72: 0x0004, 0xb73: 0x0004, 0xb74: 0x0004, 0xb75: 0x0004, + 0xb76: 0x0004, 0xb77: 0x0004, 0xb78: 0x0004, 0xb79: 0x0004, 0xb7a: 0x0004, 0xb7b: 0x0004, + 0xb7c: 0x0004, + // Block 0x2e, offset 0xb80 + 0xb86: 0x0004, + // Block 0x2f, offset 0xbc0 + 0xbed: 0x0004, 0xbee: 0x0004, 0xbef: 0x0004, + 0xbf0: 0x0004, 0xbf1: 0x0400, 0xbf2: 0x0004, 0xbf3: 0x0004, 0xbf4: 0x0004, 0xbf5: 0x0004, + 0xbf6: 0x0004, 0xbf7: 0x0004, 0xbf9: 0x0004, 0xbfa: 0x0004, 0xbfb: 0x0400, + 0xbfc: 0x0400, 0xbfd: 0x0004, 0xbfe: 0x0004, + // Block 0x30, offset 0xc00 + 0xc16: 0x0400, 0xc17: 0x0400, + 0xc18: 0x0004, 0xc19: 0x0004, + 0xc1e: 0x0004, 0xc1f: 0x0004, 0xc20: 0x0004, + 0xc31: 0x0004, 0xc32: 0x0004, 0xc33: 0x0004, 0xc34: 0x0004, + // Block 0x31, offset 0xc40 + 0xc42: 0x0004, 0xc44: 0x0400, 0xc45: 0x0004, + 0xc46: 0x0004, + 0xc4d: 0x0004, + 0xc5d: 0x0004, + // Block 0x32, offset 0xc80 + 0xc80: 0x0010, 0xc81: 0x0010, 0xc82: 0x0010, 0xc83: 0x0010, 0xc84: 0x0010, 0xc85: 0x0010, + 0xc86: 0x0010, 0xc87: 0x0010, 0xc88: 0x0010, 0xc89: 0x0010, 0xc8a: 0x0010, 0xc8b: 0x0010, + 0xc8c: 0x0010, 0xc8d: 0x0010, 0xc8e: 0x0010, 0xc8f: 0x0010, 0xc90: 0x0010, 0xc91: 0x0010, + 0xc92: 0x0010, 0xc93: 0x0010, 0xc94: 0x0010, 0xc95: 0x0010, 0xc96: 0x0010, 0xc97: 0x0010, + 0xc98: 0x0010, 0xc99: 0x0010, 0xc9a: 0x0010, 0xc9b: 0x0010, 0xc9c: 0x0010, 0xc9d: 0x0010, + 0xc9e: 0x0010, 0xc9f: 0x0010, 0xca0: 0x0010, 0xca1: 0x0010, 0xca2: 0x0010, 0xca3: 0x0010, + 0xca4: 0x0010, 0xca5: 0x0010, 0xca6: 0x0010, 0xca7: 0x0010, 0xca8: 0x0010, 0xca9: 0x0010, + 0xcaa: 0x0010, 0xcab: 0x0010, 0xcac: 0x0010, 0xcad: 0x0010, 0xcae: 0x0010, 0xcaf: 0x0010, + 0xcb0: 0x0010, 0xcb1: 0x0010, 0xcb2: 0x0010, 0xcb3: 0x0010, 0xcb4: 0x0010, 0xcb5: 0x0010, + 0xcb6: 0x0010, 0xcb7: 0x0010, 0xcb8: 0x0010, 0xcb9: 0x0010, 0xcba: 0x0010, 0xcbb: 0x0010, + 0xcbc: 0x0010, 0xcbd: 0x0010, 0xcbe: 0x0010, 0xcbf: 0x0010, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0010, 0xcc1: 0x0010, 0xcc2: 0x0010, 0xcc3: 0x0010, 0xcc4: 0x0010, 0xcc5: 0x0010, + 0xcc6: 0x0010, 0xcc7: 0x0010, 0xcc8: 0x0010, 0xcc9: 0x0010, 0xcca: 0x0010, 0xccb: 0x0010, + 0xccc: 0x0010, 0xccd: 0x0010, 0xcce: 0x0010, 0xccf: 0x0010, 0xcd0: 0x0010, 0xcd1: 0x0010, + 0xcd2: 0x0010, 0xcd3: 0x0010, 0xcd4: 0x0010, 0xcd5: 0x0010, 0xcd6: 0x0010, 0xcd7: 0x0010, + 0xcd8: 0x0010, 0xcd9: 0x0010, 0xcda: 0x0010, 0xcdb: 0x0010, 0xcdc: 0x0010, 0xcdd: 0x0010, + 0xcde: 0x0010, 0xcdf: 0x0010, 0xce0: 0x1000, 0xce1: 0x1000, 0xce2: 0x1000, 0xce3: 0x1000, + 0xce4: 0x1000, 0xce5: 0x1000, 0xce6: 0x1000, 0xce7: 0x1000, 0xce8: 0x1000, 0xce9: 0x1000, + 0xcea: 0x1000, 0xceb: 0x1000, 0xcec: 0x1000, 0xced: 0x1000, 0xcee: 0x1000, 0xcef: 0x1000, + 0xcf0: 0x1000, 0xcf1: 0x1000, 0xcf2: 0x1000, 0xcf3: 0x1000, 0xcf4: 0x1000, 0xcf5: 0x1000, + 0xcf6: 0x1000, 0xcf7: 0x1000, 0xcf8: 0x1000, 0xcf9: 0x1000, 0xcfa: 0x1000, 0xcfb: 0x1000, + 0xcfc: 0x1000, 0xcfd: 0x1000, 0xcfe: 0x1000, 0xcff: 0x1000, + // Block 0x34, offset 0xd00 + 0xd00: 0x1000, 0xd01: 0x1000, 0xd02: 0x1000, 0xd03: 0x1000, 0xd04: 0x1000, 0xd05: 0x1000, + 0xd06: 0x1000, 0xd07: 0x1000, 0xd08: 0x1000, 0xd09: 0x1000, 0xd0a: 0x1000, 0xd0b: 0x1000, + 0xd0c: 0x1000, 0xd0d: 0x1000, 0xd0e: 0x1000, 0xd0f: 0x1000, 0xd10: 0x1000, 0xd11: 0x1000, + 0xd12: 0x1000, 0xd13: 0x1000, 0xd14: 0x1000, 0xd15: 0x1000, 0xd16: 0x1000, 0xd17: 0x1000, + 0xd18: 0x1000, 0xd19: 0x1000, 0xd1a: 0x1000, 0xd1b: 0x1000, 0xd1c: 0x1000, 0xd1d: 0x1000, + 0xd1e: 0x1000, 0xd1f: 0x1000, 0xd20: 0x1000, 0xd21: 0x1000, 0xd22: 0x1000, 0xd23: 0x1000, + 0xd24: 0x1000, 0xd25: 0x1000, 0xd26: 0x1000, 0xd27: 0x1000, 0xd28: 0x0800, 0xd29: 0x0800, + 0xd2a: 0x0800, 0xd2b: 0x0800, 0xd2c: 0x0800, 0xd2d: 0x0800, 0xd2e: 0x0800, 0xd2f: 0x0800, + 0xd30: 0x0800, 0xd31: 0x0800, 0xd32: 0x0800, 0xd33: 0x0800, 0xd34: 0x0800, 0xd35: 0x0800, + 0xd36: 0x0800, 0xd37: 0x0800, 0xd38: 0x0800, 0xd39: 0x0800, 0xd3a: 0x0800, 0xd3b: 0x0800, + 0xd3c: 0x0800, 0xd3d: 0x0800, 0xd3e: 0x0800, 0xd3f: 0x0800, + // Block 0x35, offset 0xd40 + 0xd40: 0x0800, 0xd41: 0x0800, 0xd42: 0x0800, 0xd43: 0x0800, 0xd44: 0x0800, 0xd45: 0x0800, + 0xd46: 0x0800, 0xd47: 0x0800, 0xd48: 0x0800, 0xd49: 0x0800, 0xd4a: 0x0800, 0xd4b: 0x0800, + 0xd4c: 0x0800, 0xd4d: 0x0800, 0xd4e: 0x0800, 0xd4f: 0x0800, 0xd50: 0x0800, 0xd51: 0x0800, + 0xd52: 0x0800, 0xd53: 0x0800, 0xd54: 0x0800, 0xd55: 0x0800, 0xd56: 0x0800, 0xd57: 0x0800, + 0xd58: 0x0800, 0xd59: 0x0800, 0xd5a: 0x0800, 0xd5b: 0x0800, 0xd5c: 0x0800, 0xd5d: 0x0800, + 0xd5e: 0x0800, 0xd5f: 0x0800, 0xd60: 0x0800, 0xd61: 0x0800, 0xd62: 0x0800, 0xd63: 0x0800, + 0xd64: 0x0800, 0xd65: 0x0800, 0xd66: 0x0800, 0xd67: 0x0800, 0xd68: 0x0800, 0xd69: 0x0800, + 0xd6a: 0x0800, 0xd6b: 0x0800, 0xd6c: 0x0800, 0xd6d: 0x0800, 0xd6e: 0x0800, 0xd6f: 0x0800, + 0xd70: 0x0800, 0xd71: 0x0800, 0xd72: 0x0800, 0xd73: 0x0800, 0xd74: 0x0800, 0xd75: 0x0800, + 0xd76: 0x0800, 0xd77: 0x0800, 0xd78: 0x0800, 0xd79: 0x0800, 0xd7a: 0x0800, 0xd7b: 0x0800, + 0xd7c: 0x0800, 0xd7d: 0x0800, 0xd7e: 0x0800, 0xd7f: 0x0800, + // Block 0x36, offset 0xd80 + 0xd9d: 0x0004, + 0xd9e: 0x0004, 0xd9f: 0x0004, + // Block 0x37, offset 0xdc0 + 0xdd2: 0x0004, 0xdd3: 0x0004, 0xdd4: 0x0004, 0xdd5: 0x0400, + 0xdf2: 0x0004, 0xdf3: 0x0004, 0xdf4: 0x0400, + // Block 0x38, offset 0xe00 + 0xe12: 0x0004, 0xe13: 0x0004, + 0xe32: 0x0004, 0xe33: 0x0004, + // Block 0x39, offset 0xe40 + 0xe74: 0x0004, 0xe75: 0x0004, + 0xe76: 0x0400, 0xe77: 0x0004, 0xe78: 0x0004, 0xe79: 0x0004, 0xe7a: 0x0004, 0xe7b: 0x0004, + 0xe7c: 0x0004, 0xe7d: 0x0004, 0xe7e: 0x0400, 0xe7f: 0x0400, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0400, 0xe81: 0x0400, 0xe82: 0x0400, 0xe83: 0x0400, 0xe84: 0x0400, 0xe85: 0x0400, + 0xe86: 0x0004, 0xe87: 0x0400, 0xe88: 0x0400, 0xe89: 0x0004, 0xe8a: 0x0004, 0xe8b: 0x0004, + 0xe8c: 0x0004, 0xe8d: 0x0004, 0xe8e: 0x0004, 0xe8f: 0x0004, 0xe90: 0x0004, 0xe91: 0x0004, + 0xe92: 0x0004, 0xe93: 0x0004, + 0xe9d: 0x0004, + // Block 0x3b, offset 0xec0 + 0xecb: 0x0004, + 0xecc: 0x0004, 0xecd: 0x0004, 0xece: 0x0002, 0xecf: 0x0004, + // Block 0x3c, offset 0xf00 + 0xf05: 0x0004, + 0xf06: 0x0004, + 0xf29: 0x0004, + // Block 0x3d, offset 0xf40 + 0xf60: 0x0004, 0xf61: 0x0004, 0xf62: 0x0004, 0xf63: 0x0400, + 0xf64: 0x0400, 0xf65: 0x0400, 0xf66: 0x0400, 0xf67: 0x0004, 0xf68: 0x0004, 0xf69: 0x0400, + 0xf6a: 0x0400, 0xf6b: 0x0400, + 0xf70: 0x0400, 0xf71: 0x0400, 0xf72: 0x0004, 0xf73: 0x0400, 0xf74: 0x0400, 0xf75: 0x0400, + 0xf76: 0x0400, 0xf77: 0x0400, 0xf78: 0x0400, 0xf79: 0x0004, 0xf7a: 0x0004, 0xf7b: 0x0004, + // Block 0x3e, offset 0xf80 + 0xf97: 0x0004, + 0xf98: 0x0004, 0xf99: 0x0400, 0xf9a: 0x0400, 0xf9b: 0x0004, + // Block 0x3f, offset 0xfc0 + 0xfd5: 0x0400, 0xfd6: 0x0004, 0xfd7: 0x0400, + 0xfd8: 0x0004, 0xfd9: 0x0004, 0xfda: 0x0004, 0xfdb: 0x0004, 0xfdc: 0x0004, 0xfdd: 0x0004, + 0xfde: 0x0004, 0xfe0: 0x0004, 0xfe2: 0x0004, + 0xfe5: 0x0004, 0xfe6: 0x0004, 0xfe7: 0x0004, 0xfe8: 0x0004, 0xfe9: 0x0004, + 0xfea: 0x0004, 0xfeb: 0x0004, 0xfec: 0x0004, 0xfed: 0x0400, 0xfee: 0x0400, 0xfef: 0x0400, + 0xff0: 0x0400, 0xff1: 0x0400, 0xff2: 0x0400, 0xff3: 0x0004, 0xff4: 0x0004, 0xff5: 0x0004, + 0xff6: 0x0004, 0xff7: 0x0004, 0xff8: 0x0004, 0xff9: 0x0004, 0xffa: 0x0004, 0xffb: 0x0004, + 0xffc: 0x0004, 0xfff: 0x0004, + // Block 0x40, offset 0x1000 + 0x1030: 0x0004, 0x1031: 0x0004, 0x1032: 0x0004, 0x1033: 0x0004, 0x1034: 0x0004, 0x1035: 0x0004, + 0x1036: 0x0004, 0x1037: 0x0004, 0x1038: 0x0004, 0x1039: 0x0004, 0x103a: 0x0004, 0x103b: 0x0004, + 0x103c: 0x0004, 0x103d: 0x0004, 0x103e: 0x0004, 0x103f: 0x0004, + // Block 0x41, offset 0x1040 + 0x1040: 0x0004, 0x1041: 0x0004, 0x1042: 0x0004, 0x1043: 0x0004, 0x1044: 0x0004, 0x1045: 0x0004, + 0x1046: 0x0004, 0x1047: 0x0004, 0x1048: 0x0004, 0x1049: 0x0004, 0x104a: 0x0004, 0x104b: 0x0004, + 0x104c: 0x0004, 0x104d: 0x0004, 0x104e: 0x0004, + // Block 0x42, offset 0x1080 + 0x1080: 0x0004, 0x1081: 0x0004, 0x1082: 0x0004, 0x1083: 0x0004, 0x1084: 0x0400, + 0x10b4: 0x0004, 0x10b5: 0x0004, + 0x10b6: 0x0004, 0x10b7: 0x0004, 0x10b8: 0x0004, 0x10b9: 0x0004, 0x10ba: 0x0004, 0x10bb: 0x0400, + 0x10bc: 0x0004, 0x10bd: 0x0400, 0x10be: 0x0400, 0x10bf: 0x0400, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x0400, 0x10c1: 0x0400, 0x10c2: 0x0004, 0x10c3: 0x0400, 0x10c4: 0x0400, + 0x10eb: 0x0004, 0x10ec: 0x0004, 0x10ed: 0x0004, 0x10ee: 0x0004, 0x10ef: 0x0004, + 0x10f0: 0x0004, 0x10f1: 0x0004, 0x10f2: 0x0004, 0x10f3: 0x0004, + // Block 0x44, offset 0x1100 + 0x1100: 0x0004, 0x1101: 0x0004, 0x1102: 0x0400, + 0x1121: 0x0400, 0x1122: 0x0004, 0x1123: 0x0004, + 0x1124: 0x0004, 0x1125: 0x0004, 0x1126: 0x0400, 0x1127: 0x0400, 0x1128: 0x0004, 0x1129: 0x0004, + 0x112a: 0x0400, 0x112b: 0x0004, 0x112c: 0x0004, 0x112d: 0x0004, + // Block 0x45, offset 0x1140 + 0x1166: 0x0004, 0x1167: 0x0400, 0x1168: 0x0004, 0x1169: 0x0004, + 0x116a: 0x0400, 0x116b: 0x0400, 0x116c: 0x0400, 0x116d: 0x0004, 0x116e: 0x0400, 0x116f: 0x0004, + 0x1170: 0x0004, 0x1171: 0x0004, 0x1172: 0x0400, 0x1173: 0x0400, + // Block 0x46, offset 0x1180 + 0x11a4: 0x0400, 0x11a5: 0x0400, 0x11a6: 0x0400, 0x11a7: 0x0400, 0x11a8: 0x0400, 0x11a9: 0x0400, + 0x11aa: 0x0400, 0x11ab: 0x0400, 0x11ac: 0x0004, 0x11ad: 0x0004, 0x11ae: 0x0004, 0x11af: 0x0004, + 0x11b0: 0x0004, 0x11b1: 0x0004, 0x11b2: 0x0004, 0x11b3: 0x0004, 0x11b4: 0x0400, 0x11b5: 0x0400, + 0x11b6: 0x0004, 0x11b7: 0x0004, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0004, 0x11d1: 0x0004, + 0x11d2: 0x0004, 0x11d4: 0x0004, 0x11d5: 0x0004, 0x11d6: 0x0004, 0x11d7: 0x0004, + 0x11d8: 0x0004, 0x11d9: 0x0004, 0x11da: 0x0004, 0x11db: 0x0004, 0x11dc: 0x0004, 0x11dd: 0x0004, + 0x11de: 0x0004, 0x11df: 0x0004, 0x11e0: 0x0004, 0x11e1: 0x0400, 0x11e2: 0x0004, 0x11e3: 0x0004, + 0x11e4: 0x0004, 0x11e5: 0x0004, 0x11e6: 0x0004, 0x11e7: 0x0004, 0x11e8: 0x0004, + 0x11ed: 0x0004, + 0x11f4: 0x0004, + 0x11f7: 0x0400, 0x11f8: 0x0004, 0x11f9: 0x0004, + // Block 0x48, offset 0x1200 + 0x120b: 0x0002, + 0x120c: 0x0004, 0x120d: 0x2000, 0x120e: 0x0002, 0x120f: 0x0002, + 0x1228: 0x0002, 0x1229: 0x0002, + 0x122a: 0x0002, 0x122b: 0x0002, 0x122c: 0x0002, 0x122d: 0x0002, 0x122e: 0x0002, + 0x123c: 0x0008, + // Block 0x49, offset 0x1240 + 0x1249: 0x0008, + 0x1260: 0x0002, 0x1261: 0x0002, 0x1262: 0x0002, 0x1263: 0x0002, + 0x1264: 0x0002, 0x1265: 0x0002, 0x1266: 0x0002, 0x1267: 0x0002, 0x1268: 0x0002, 0x1269: 0x0002, + 0x126a: 0x0002, 0x126b: 0x0002, 0x126c: 0x0002, 0x126d: 0x0002, 0x126e: 0x0002, 0x126f: 0x0002, + // Block 0x4a, offset 0x1280 + 0x1290: 0x0004, 0x1291: 0x0004, + 0x1292: 0x0004, 0x1293: 0x0004, 0x1294: 0x0004, 0x1295: 0x0004, 0x1296: 0x0004, 0x1297: 0x0004, + 0x1298: 0x0004, 0x1299: 0x0004, 0x129a: 0x0004, 0x129b: 0x0004, 0x129c: 0x0004, 0x129d: 0x0004, + 0x129e: 0x0004, 0x129f: 0x0004, 0x12a0: 0x0004, 0x12a1: 0x0004, 0x12a2: 0x0004, 0x12a3: 0x0004, + 0x12a4: 0x0004, 0x12a5: 0x0004, 0x12a6: 0x0004, 0x12a7: 0x0004, 0x12a8: 0x0004, 0x12a9: 0x0004, + 0x12aa: 0x0004, 0x12ab: 0x0004, 0x12ac: 0x0004, 0x12ad: 0x0004, 0x12ae: 0x0004, 0x12af: 0x0004, + 0x12b0: 0x0004, + // Block 0x4b, offset 0x12c0 + 0x12e2: 0x0008, + 0x12f9: 0x0008, + // Block 0x4c, offset 0x1300 + 0x1314: 0x0008, 0x1315: 0x0008, 0x1316: 0x0008, 0x1317: 0x0008, + 0x1318: 0x0008, 0x1319: 0x0008, + 0x1329: 0x0008, + 0x132a: 0x0008, + // Block 0x4d, offset 0x1340 + 0x135a: 0x0008, 0x135b: 0x0008, + 0x1368: 0x0008, + // Block 0x4e, offset 0x1380 + 0x1388: 0x0008, + // Block 0x4f, offset 0x13c0 + 0x13cf: 0x0008, + 0x13e9: 0x0008, + 0x13ea: 0x0008, 0x13eb: 0x0008, 0x13ec: 0x0008, 0x13ed: 0x0008, 0x13ee: 0x0008, 0x13ef: 0x0008, + 0x13f0: 0x0008, 0x13f1: 0x0008, 0x13f2: 0x0008, 0x13f3: 0x0008, + 0x13f8: 0x0008, 0x13f9: 0x0008, 0x13fa: 0x0008, + // Block 0x50, offset 0x1400 + 0x1402: 0x0008, + // Block 0x51, offset 0x1440 + 0x146a: 0x0008, 0x146b: 0x0008, + 0x1476: 0x0008, + // Block 0x52, offset 0x1480 + 0x1480: 0x0008, + 0x14bb: 0x0008, + 0x14bc: 0x0008, 0x14bd: 0x0008, 0x14be: 0x0008, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0008, 0x14c1: 0x0008, 0x14c2: 0x0008, 0x14c3: 0x0008, 0x14c4: 0x0008, 0x14c5: 0x0008, + 0x14c7: 0x0008, 0x14c8: 0x0008, 0x14c9: 0x0008, 0x14ca: 0x0008, 0x14cb: 0x0008, + 0x14cc: 0x0008, 0x14cd: 0x0008, 0x14ce: 0x0008, 0x14cf: 0x0008, 0x14d0: 0x0008, 0x14d1: 0x0008, + 0x14d2: 0x0008, 0x14d4: 0x0008, 0x14d5: 0x0008, 0x14d6: 0x0008, 0x14d7: 0x0008, + 0x14d8: 0x0008, 0x14d9: 0x0008, 0x14da: 0x0008, 0x14db: 0x0008, 0x14dc: 0x0008, 0x14dd: 0x0008, + 0x14de: 0x0008, 0x14df: 0x0008, 0x14e0: 0x0008, 0x14e1: 0x0008, 0x14e2: 0x0008, 0x14e3: 0x0008, + 0x14e4: 0x0008, 0x14e5: 0x0008, 0x14e6: 0x0008, 0x14e7: 0x0008, 0x14e8: 0x0008, 0x14e9: 0x0008, + 0x14ea: 0x0008, 0x14eb: 0x0008, 0x14ec: 0x0008, 0x14ed: 0x0008, 0x14ee: 0x0008, 0x14ef: 0x0008, + 0x14f0: 0x0008, 0x14f1: 0x0008, 0x14f2: 0x0008, 0x14f3: 0x0008, 0x14f4: 0x0008, 0x14f5: 0x0008, + 0x14f6: 0x0008, 0x14f7: 0x0008, 0x14f8: 0x0008, 0x14f9: 0x0008, 0x14fa: 0x0008, 0x14fb: 0x0008, + 0x14fc: 0x0008, 0x14fd: 0x0008, 0x14fe: 0x0008, 0x14ff: 0x0008, + // Block 0x54, offset 0x1500 + 0x1500: 0x0008, 0x1501: 0x0008, 0x1502: 0x0008, 0x1503: 0x0008, 0x1504: 0x0008, 0x1505: 0x0008, + 0x1506: 0x0008, 0x1507: 0x0008, 0x1508: 0x0008, 0x1509: 0x0008, 0x150a: 0x0008, 0x150b: 0x0008, + 0x150c: 0x0008, 0x150d: 0x0008, 0x150e: 0x0008, 0x150f: 0x0008, 0x1510: 0x0008, 0x1511: 0x0008, + 0x1512: 0x0008, 0x1513: 0x0008, 0x1514: 0x0008, 0x1515: 0x0008, 0x1516: 0x0008, 0x1517: 0x0008, + 0x1518: 0x0008, 0x1519: 0x0008, 0x151a: 0x0008, 0x151b: 0x0008, 0x151c: 0x0008, 0x151d: 0x0008, + 0x151e: 0x0008, 0x151f: 0x0008, 0x1520: 0x0008, 0x1521: 0x0008, 0x1522: 0x0008, 0x1523: 0x0008, + 0x1524: 0x0008, 0x1525: 0x0008, 0x1526: 0x0008, 0x1527: 0x0008, 0x1528: 0x0008, 0x1529: 0x0008, + 0x152a: 0x0008, 0x152b: 0x0008, 0x152c: 0x0008, 0x152d: 0x0008, 0x152e: 0x0008, 0x152f: 0x0008, + 0x1530: 0x0008, 0x1531: 0x0008, 0x1532: 0x0008, 0x1533: 0x0008, 0x1534: 0x0008, 0x1535: 0x0008, + 0x1536: 0x0008, 0x1537: 0x0008, 0x1538: 0x0008, 0x1539: 0x0008, 0x153a: 0x0008, 0x153b: 0x0008, + 0x153c: 0x0008, 0x153d: 0x0008, 0x153e: 0x0008, 0x153f: 0x0008, + // Block 0x55, offset 0x1540 + 0x1540: 0x0008, 0x1541: 0x0008, 0x1542: 0x0008, 0x1543: 0x0008, 0x1544: 0x0008, 0x1545: 0x0008, + 0x1550: 0x0008, 0x1551: 0x0008, + 0x1552: 0x0008, 0x1553: 0x0008, 0x1554: 0x0008, 0x1555: 0x0008, 0x1556: 0x0008, 0x1557: 0x0008, + 0x1558: 0x0008, 0x1559: 0x0008, 0x155a: 0x0008, 0x155b: 0x0008, 0x155c: 0x0008, 0x155d: 0x0008, + 0x155e: 0x0008, 0x155f: 0x0008, 0x1560: 0x0008, 0x1561: 0x0008, 0x1562: 0x0008, 0x1563: 0x0008, + 0x1564: 0x0008, 0x1565: 0x0008, 0x1566: 0x0008, 0x1567: 0x0008, 0x1568: 0x0008, 0x1569: 0x0008, + 0x156a: 0x0008, 0x156b: 0x0008, 0x156c: 0x0008, 0x156d: 0x0008, 0x156e: 0x0008, 0x156f: 0x0008, + 0x1570: 0x0008, 0x1571: 0x0008, 0x1572: 0x0008, 0x1573: 0x0008, 0x1574: 0x0008, 0x1575: 0x0008, + 0x1576: 0x0008, 0x1577: 0x0008, 0x1578: 0x0008, 0x1579: 0x0008, 0x157a: 0x0008, 0x157b: 0x0008, + 0x157c: 0x0008, 0x157d: 0x0008, 0x157e: 0x0008, 0x157f: 0x0008, + // Block 0x56, offset 0x1580 + 0x1580: 0x0008, 0x1581: 0x0008, 0x1582: 0x0008, 0x1583: 0x0008, 0x1584: 0x0008, 0x1585: 0x0008, + 0x1588: 0x0008, 0x1589: 0x0008, 0x158a: 0x0008, 0x158b: 0x0008, + 0x158c: 0x0008, 0x158d: 0x0008, 0x158e: 0x0008, 0x158f: 0x0008, 0x1590: 0x0008, 0x1591: 0x0008, + 0x1592: 0x0008, 0x1594: 0x0008, 0x1596: 0x0008, + 0x159d: 0x0008, + 0x15a1: 0x0008, + 0x15a8: 0x0008, + 0x15b3: 0x0008, 0x15b4: 0x0008, + // Block 0x57, offset 0x15c0 + 0x15c4: 0x0008, + 0x15c7: 0x0008, + 0x15cc: 0x0008, 0x15ce: 0x0008, + 0x15d3: 0x0008, 0x15d4: 0x0008, 0x15d5: 0x0008, 0x15d7: 0x0008, + 0x15e3: 0x0008, + 0x15e4: 0x0008, 0x15e5: 0x0008, 0x15e6: 0x0008, 0x15e7: 0x0008, + // Block 0x58, offset 0x1600 + 0x1615: 0x0008, 0x1616: 0x0008, 0x1617: 0x0008, + 0x1621: 0x0008, + 0x1630: 0x0008, + 0x163f: 0x0008, + // Block 0x59, offset 0x1640 + 0x1674: 0x0008, 0x1675: 0x0008, + // Block 0x5a, offset 0x1680 + 0x1685: 0x0008, + 0x1686: 0x0008, 0x1687: 0x0008, + 0x169b: 0x0008, 0x169c: 0x0008, + // Block 0x5b, offset 0x16c0 + 0x16d0: 0x0008, + 0x16d5: 0x0008, + // Block 0x5c, offset 0x1700 + 0x172f: 0x0004, + 0x1730: 0x0004, 0x1731: 0x0004, + // Block 0x5d, offset 0x1740 + 0x177f: 0x0004, + // Block 0x5e, offset 0x1780 + 0x17a0: 0x0004, 0x17a1: 0x0004, 0x17a2: 0x0004, 0x17a3: 0x0004, + 0x17a4: 0x0004, 0x17a5: 0x0004, 0x17a6: 0x0004, 0x17a7: 0x0004, 0x17a8: 0x0004, 0x17a9: 0x0004, + 0x17aa: 0x0004, 0x17ab: 0x0004, 0x17ac: 0x0004, 0x17ad: 0x0004, 0x17ae: 0x0004, 0x17af: 0x0004, + 0x17b0: 0x0004, 0x17b1: 0x0004, 0x17b2: 0x0004, 0x17b3: 0x0004, 0x17b4: 0x0004, 0x17b5: 0x0004, + 0x17b6: 0x0004, 0x17b7: 0x0004, 0x17b8: 0x0004, 0x17b9: 0x0004, 0x17ba: 0x0004, 0x17bb: 0x0004, + 0x17bc: 0x0004, 0x17bd: 0x0004, 0x17be: 0x0004, 0x17bf: 0x0004, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x0004, 0x17eb: 0x0004, 0x17ec: 0x0004, 0x17ed: 0x0004, 0x17ee: 0x0004, 0x17ef: 0x0004, + 0x17f0: 0x0008, + 0x17fd: 0x0008, + // Block 0x60, offset 0x1800 + 0x1819: 0x0004, 0x181a: 0x0004, + // Block 0x61, offset 0x1840 + 0x1857: 0x0008, + 0x1859: 0x0008, + // Block 0x62, offset 0x1880 + 0x18af: 0x0004, + 0x18b0: 0x0004, 0x18b1: 0x0004, 0x18b2: 0x0004, 0x18b4: 0x0004, 0x18b5: 0x0004, + 0x18b6: 0x0004, 0x18b7: 0x0004, 0x18b8: 0x0004, 0x18b9: 0x0004, 0x18ba: 0x0004, 0x18bb: 0x0004, + 0x18bc: 0x0004, 0x18bd: 0x0004, + // Block 0x63, offset 0x18c0 + 0x18de: 0x0004, 0x18df: 0x0004, + // Block 0x64, offset 0x1900 + 0x1930: 0x0004, 0x1931: 0x0004, + // Block 0x65, offset 0x1940 + 0x1942: 0x0004, + 0x1946: 0x0004, 0x194b: 0x0004, + 0x1963: 0x0400, + 0x1964: 0x0400, 0x1965: 0x0004, 0x1966: 0x0004, 0x1967: 0x0400, + 0x196c: 0x0004, + // Block 0x66, offset 0x1980 + 0x1980: 0x0400, 0x1981: 0x0400, + 0x19b4: 0x0400, 0x19b5: 0x0400, + 0x19b6: 0x0400, 0x19b7: 0x0400, 0x19b8: 0x0400, 0x19b9: 0x0400, 0x19ba: 0x0400, 0x19bb: 0x0400, + 0x19bc: 0x0400, 0x19bd: 0x0400, 0x19be: 0x0400, 0x19bf: 0x0400, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0400, 0x19c1: 0x0400, 0x19c2: 0x0400, 0x19c3: 0x0400, 0x19c4: 0x0004, 0x19c5: 0x0004, + 0x19e0: 0x0004, 0x19e1: 0x0004, 0x19e2: 0x0004, 0x19e3: 0x0004, + 0x19e4: 0x0004, 0x19e5: 0x0004, 0x19e6: 0x0004, 0x19e7: 0x0004, 0x19e8: 0x0004, 0x19e9: 0x0004, + 0x19ea: 0x0004, 0x19eb: 0x0004, 0x19ec: 0x0004, 0x19ed: 0x0004, 0x19ee: 0x0004, 0x19ef: 0x0004, + 0x19f0: 0x0004, 0x19f1: 0x0004, + 0x19ff: 0x0004, + // Block 0x68, offset 0x1a00 + 0x1a26: 0x0004, 0x1a27: 0x0004, 0x1a28: 0x0004, 0x1a29: 0x0004, + 0x1a2a: 0x0004, 0x1a2b: 0x0004, 0x1a2c: 0x0004, 0x1a2d: 0x0004, + // Block 0x69, offset 0x1a40 + 0x1a47: 0x0004, 0x1a48: 0x0004, 0x1a49: 0x0004, 0x1a4a: 0x0004, 0x1a4b: 0x0004, + 0x1a4c: 0x0004, 0x1a4d: 0x0004, 0x1a4e: 0x0004, 0x1a4f: 0x0004, 0x1a50: 0x0004, 0x1a51: 0x0004, + 0x1a52: 0x0400, 0x1a53: 0x0400, + 0x1a60: 0x0010, 0x1a61: 0x0010, 0x1a62: 0x0010, 0x1a63: 0x0010, + 0x1a64: 0x0010, 0x1a65: 0x0010, 0x1a66: 0x0010, 0x1a67: 0x0010, 0x1a68: 0x0010, 0x1a69: 0x0010, + 0x1a6a: 0x0010, 0x1a6b: 0x0010, 0x1a6c: 0x0010, 0x1a6d: 0x0010, 0x1a6e: 0x0010, 0x1a6f: 0x0010, + 0x1a70: 0x0010, 0x1a71: 0x0010, 0x1a72: 0x0010, 0x1a73: 0x0010, 0x1a74: 0x0010, 0x1a75: 0x0010, + 0x1a76: 0x0010, 0x1a77: 0x0010, 0x1a78: 0x0010, 0x1a79: 0x0010, 0x1a7a: 0x0010, 0x1a7b: 0x0010, + 0x1a7c: 0x0010, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0004, 0x1a81: 0x0004, 0x1a82: 0x0004, 0x1a83: 0x0400, + 0x1ab3: 0x0004, 0x1ab4: 0x0400, 0x1ab5: 0x0400, + 0x1ab6: 0x0004, 0x1ab7: 0x0004, 0x1ab8: 0x0004, 0x1ab9: 0x0004, 0x1aba: 0x0400, 0x1abb: 0x0400, + 0x1abc: 0x0004, 0x1abd: 0x0004, 0x1abe: 0x0400, 0x1abf: 0x0400, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0400, + 0x1ae5: 0x0004, + // Block 0x6c, offset 0x1b00 + 0x1b29: 0x0004, + 0x1b2a: 0x0004, 0x1b2b: 0x0004, 0x1b2c: 0x0004, 0x1b2d: 0x0004, 0x1b2e: 0x0004, 0x1b2f: 0x0400, + 0x1b30: 0x0400, 0x1b31: 0x0004, 0x1b32: 0x0004, 0x1b33: 0x0400, 0x1b34: 0x0400, 0x1b35: 0x0004, + 0x1b36: 0x0004, + // Block 0x6d, offset 0x1b40 + 0x1b43: 0x0004, + 0x1b4c: 0x0004, 0x1b4d: 0x0400, + 0x1b7c: 0x0004, + // Block 0x6e, offset 0x1b80 + 0x1bb0: 0x0004, 0x1bb2: 0x0004, 0x1bb3: 0x0004, 0x1bb4: 0x0004, + 0x1bb7: 0x0004, 0x1bb8: 0x0004, + 0x1bbe: 0x0004, 0x1bbf: 0x0004, + // Block 0x6f, offset 0x1bc0 + 0x1bc1: 0x0004, + 0x1beb: 0x0400, 0x1bec: 0x0004, 0x1bed: 0x0004, 0x1bee: 0x0400, 0x1bef: 0x0400, + 0x1bf5: 0x0400, + 0x1bf6: 0x0004, + // Block 0x70, offset 0x1c00 + 0x1c23: 0x0400, + 0x1c24: 0x0400, 0x1c25: 0x0004, 0x1c26: 0x0400, 0x1c27: 0x0400, 0x1c28: 0x0004, 0x1c29: 0x0400, + 0x1c2a: 0x0400, 0x1c2c: 0x0400, 0x1c2d: 0x0004, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0040, 0x1c41: 0x0080, 0x1c42: 0x0080, 0x1c43: 0x0080, 0x1c44: 0x0080, 0x1c45: 0x0080, + 0x1c46: 0x0080, 0x1c47: 0x0080, 0x1c48: 0x0080, 0x1c49: 0x0080, 0x1c4a: 0x0080, 0x1c4b: 0x0080, + 0x1c4c: 0x0080, 0x1c4d: 0x0080, 0x1c4e: 0x0080, 0x1c4f: 0x0080, 0x1c50: 0x0080, 0x1c51: 0x0080, + 0x1c52: 0x0080, 0x1c53: 0x0080, 0x1c54: 0x0080, 0x1c55: 0x0080, 0x1c56: 0x0080, 0x1c57: 0x0080, + 0x1c58: 0x0080, 0x1c59: 0x0080, 0x1c5a: 0x0080, 0x1c5b: 0x0080, 0x1c5c: 0x0040, 0x1c5d: 0x0080, + 0x1c5e: 0x0080, 0x1c5f: 0x0080, 0x1c60: 0x0080, 0x1c61: 0x0080, 0x1c62: 0x0080, 0x1c63: 0x0080, + 0x1c64: 0x0080, 0x1c65: 0x0080, 0x1c66: 0x0080, 0x1c67: 0x0080, 0x1c68: 0x0080, 0x1c69: 0x0080, + 0x1c6a: 0x0080, 0x1c6b: 0x0080, 0x1c6c: 0x0080, 0x1c6d: 0x0080, 0x1c6e: 0x0080, 0x1c6f: 0x0080, + 0x1c70: 0x0080, 0x1c71: 0x0080, 0x1c72: 0x0080, 0x1c73: 0x0080, 0x1c74: 0x0080, 0x1c75: 0x0080, + 0x1c76: 0x0080, 0x1c77: 0x0080, 0x1c78: 0x0040, 0x1c79: 0x0080, 0x1c7a: 0x0080, 0x1c7b: 0x0080, + 0x1c7c: 0x0080, 0x1c7d: 0x0080, 0x1c7e: 0x0080, 0x1c7f: 0x0080, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0080, 0x1c81: 0x0080, 0x1c82: 0x0080, 0x1c83: 0x0080, 0x1c84: 0x0080, 0x1c85: 0x0080, + 0x1c86: 0x0080, 0x1c87: 0x0080, 0x1c88: 0x0080, 0x1c89: 0x0080, 0x1c8a: 0x0080, 0x1c8b: 0x0080, + 0x1c8c: 0x0080, 0x1c8d: 0x0080, 0x1c8e: 0x0080, 0x1c8f: 0x0080, 0x1c90: 0x0080, 0x1c91: 0x0080, + 0x1c92: 0x0080, 0x1c93: 0x0080, 0x1c94: 0x0040, 0x1c95: 0x0080, 0x1c96: 0x0080, 0x1c97: 0x0080, + 0x1c98: 0x0080, 0x1c99: 0x0080, 0x1c9a: 0x0080, 0x1c9b: 0x0080, 0x1c9c: 0x0080, 0x1c9d: 0x0080, + 0x1c9e: 0x0080, 0x1c9f: 0x0080, 0x1ca0: 0x0080, 0x1ca1: 0x0080, 0x1ca2: 0x0080, 0x1ca3: 0x0080, + 0x1ca4: 0x0080, 0x1ca5: 0x0080, 0x1ca6: 0x0080, 0x1ca7: 0x0080, 0x1ca8: 0x0080, 0x1ca9: 0x0080, + 0x1caa: 0x0080, 0x1cab: 0x0080, 0x1cac: 0x0080, 0x1cad: 0x0080, 0x1cae: 0x0080, 0x1caf: 0x0080, + 0x1cb0: 0x0040, 0x1cb1: 0x0080, 0x1cb2: 0x0080, 0x1cb3: 0x0080, 0x1cb4: 0x0080, 0x1cb5: 0x0080, + 0x1cb6: 0x0080, 0x1cb7: 0x0080, 0x1cb8: 0x0080, 0x1cb9: 0x0080, 0x1cba: 0x0080, 0x1cbb: 0x0080, + 0x1cbc: 0x0080, 0x1cbd: 0x0080, 0x1cbe: 0x0080, 0x1cbf: 0x0080, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0080, 0x1cc1: 0x0080, 0x1cc2: 0x0080, 0x1cc3: 0x0080, 0x1cc4: 0x0080, 0x1cc5: 0x0080, + 0x1cc6: 0x0080, 0x1cc7: 0x0080, 0x1cc8: 0x0080, 0x1cc9: 0x0080, 0x1cca: 0x0080, 0x1ccb: 0x0080, + 0x1ccc: 0x0040, 0x1ccd: 0x0080, 0x1cce: 0x0080, 0x1ccf: 0x0080, 0x1cd0: 0x0080, 0x1cd1: 0x0080, + 0x1cd2: 0x0080, 0x1cd3: 0x0080, 0x1cd4: 0x0080, 0x1cd5: 0x0080, 0x1cd6: 0x0080, 0x1cd7: 0x0080, + 0x1cd8: 0x0080, 0x1cd9: 0x0080, 0x1cda: 0x0080, 0x1cdb: 0x0080, 0x1cdc: 0x0080, 0x1cdd: 0x0080, + 0x1cde: 0x0080, 0x1cdf: 0x0080, 0x1ce0: 0x0080, 0x1ce1: 0x0080, 0x1ce2: 0x0080, 0x1ce3: 0x0080, + 0x1ce4: 0x0080, 0x1ce5: 0x0080, 0x1ce6: 0x0080, 0x1ce7: 0x0080, 0x1ce8: 0x0040, 0x1ce9: 0x0080, + 0x1cea: 0x0080, 0x1ceb: 0x0080, 0x1cec: 0x0080, 0x1ced: 0x0080, 0x1cee: 0x0080, 0x1cef: 0x0080, + 0x1cf0: 0x0080, 0x1cf1: 0x0080, 0x1cf2: 0x0080, 0x1cf3: 0x0080, 0x1cf4: 0x0080, 0x1cf5: 0x0080, + 0x1cf6: 0x0080, 0x1cf7: 0x0080, 0x1cf8: 0x0080, 0x1cf9: 0x0080, 0x1cfa: 0x0080, 0x1cfb: 0x0080, + 0x1cfc: 0x0080, 0x1cfd: 0x0080, 0x1cfe: 0x0080, 0x1cff: 0x0080, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0080, 0x1d01: 0x0080, 0x1d02: 0x0080, 0x1d03: 0x0080, 0x1d04: 0x0040, 0x1d05: 0x0080, + 0x1d06: 0x0080, 0x1d07: 0x0080, 0x1d08: 0x0080, 0x1d09: 0x0080, 0x1d0a: 0x0080, 0x1d0b: 0x0080, + 0x1d0c: 0x0080, 0x1d0d: 0x0080, 0x1d0e: 0x0080, 0x1d0f: 0x0080, 0x1d10: 0x0080, 0x1d11: 0x0080, + 0x1d12: 0x0080, 0x1d13: 0x0080, 0x1d14: 0x0080, 0x1d15: 0x0080, 0x1d16: 0x0080, 0x1d17: 0x0080, + 0x1d18: 0x0080, 0x1d19: 0x0080, 0x1d1a: 0x0080, 0x1d1b: 0x0080, 0x1d1c: 0x0080, 0x1d1d: 0x0080, + 0x1d1e: 0x0080, 0x1d1f: 0x0080, 0x1d20: 0x0040, 0x1d21: 0x0080, 0x1d22: 0x0080, 0x1d23: 0x0080, + 0x1d24: 0x0080, 0x1d25: 0x0080, 0x1d26: 0x0080, 0x1d27: 0x0080, 0x1d28: 0x0080, 0x1d29: 0x0080, + 0x1d2a: 0x0080, 0x1d2b: 0x0080, 0x1d2c: 0x0080, 0x1d2d: 0x0080, 0x1d2e: 0x0080, 0x1d2f: 0x0080, + 0x1d30: 0x0080, 0x1d31: 0x0080, 0x1d32: 0x0080, 0x1d33: 0x0080, 0x1d34: 0x0080, 0x1d35: 0x0080, + 0x1d36: 0x0080, 0x1d37: 0x0080, 0x1d38: 0x0080, 0x1d39: 0x0080, 0x1d3a: 0x0080, 0x1d3b: 0x0080, + 0x1d3c: 0x0040, 0x1d3d: 0x0080, 0x1d3e: 0x0080, 0x1d3f: 0x0080, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0080, 0x1d41: 0x0080, 0x1d42: 0x0080, 0x1d43: 0x0080, 0x1d44: 0x0080, 0x1d45: 0x0080, + 0x1d46: 0x0080, 0x1d47: 0x0080, 0x1d48: 0x0080, 0x1d49: 0x0080, 0x1d4a: 0x0080, 0x1d4b: 0x0080, + 0x1d4c: 0x0080, 0x1d4d: 0x0080, 0x1d4e: 0x0080, 0x1d4f: 0x0080, 0x1d50: 0x0080, 0x1d51: 0x0080, + 0x1d52: 0x0080, 0x1d53: 0x0080, 0x1d54: 0x0080, 0x1d55: 0x0080, 0x1d56: 0x0080, 0x1d57: 0x0080, + 0x1d58: 0x0040, 0x1d59: 0x0080, 0x1d5a: 0x0080, 0x1d5b: 0x0080, 0x1d5c: 0x0080, 0x1d5d: 0x0080, + 0x1d5e: 0x0080, 0x1d5f: 0x0080, 0x1d60: 0x0080, 0x1d61: 0x0080, 0x1d62: 0x0080, 0x1d63: 0x0080, + 0x1d64: 0x0080, 0x1d65: 0x0080, 0x1d66: 0x0080, 0x1d67: 0x0080, 0x1d68: 0x0080, 0x1d69: 0x0080, + 0x1d6a: 0x0080, 0x1d6b: 0x0080, 0x1d6c: 0x0080, 0x1d6d: 0x0080, 0x1d6e: 0x0080, 0x1d6f: 0x0080, + 0x1d70: 0x0080, 0x1d71: 0x0080, 0x1d72: 0x0080, 0x1d73: 0x0080, 0x1d74: 0x0040, 0x1d75: 0x0080, + 0x1d76: 0x0080, 0x1d77: 0x0080, 0x1d78: 0x0080, 0x1d79: 0x0080, 0x1d7a: 0x0080, 0x1d7b: 0x0080, + 0x1d7c: 0x0080, 0x1d7d: 0x0080, 0x1d7e: 0x0080, 0x1d7f: 0x0080, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0080, 0x1d81: 0x0080, 0x1d82: 0x0080, 0x1d83: 0x0080, 0x1d84: 0x0080, 0x1d85: 0x0080, + 0x1d86: 0x0080, 0x1d87: 0x0080, 0x1d88: 0x0080, 0x1d89: 0x0080, 0x1d8a: 0x0080, 0x1d8b: 0x0080, + 0x1d8c: 0x0080, 0x1d8d: 0x0080, 0x1d8e: 0x0080, 0x1d8f: 0x0080, 0x1d90: 0x0040, 0x1d91: 0x0080, + 0x1d92: 0x0080, 0x1d93: 0x0080, 0x1d94: 0x0080, 0x1d95: 0x0080, 0x1d96: 0x0080, 0x1d97: 0x0080, + 0x1d98: 0x0080, 0x1d99: 0x0080, 0x1d9a: 0x0080, 0x1d9b: 0x0080, 0x1d9c: 0x0080, 0x1d9d: 0x0080, + 0x1d9e: 0x0080, 0x1d9f: 0x0080, 0x1da0: 0x0080, 0x1da1: 0x0080, 0x1da2: 0x0080, 0x1da3: 0x0080, + 0x1da4: 0x0080, 0x1da5: 0x0080, 0x1da6: 0x0080, 0x1da7: 0x0080, 0x1da8: 0x0080, 0x1da9: 0x0080, + 0x1daa: 0x0080, 0x1dab: 0x0080, 0x1dac: 0x0040, 0x1dad: 0x0080, 0x1dae: 0x0080, 0x1daf: 0x0080, + 0x1db0: 0x0080, 0x1db1: 0x0080, 0x1db2: 0x0080, 0x1db3: 0x0080, 0x1db4: 0x0080, 0x1db5: 0x0080, + 0x1db6: 0x0080, 0x1db7: 0x0080, 0x1db8: 0x0080, 0x1db9: 0x0080, 0x1dba: 0x0080, 0x1dbb: 0x0080, + 0x1dbc: 0x0080, 0x1dbd: 0x0080, 0x1dbe: 0x0080, 0x1dbf: 0x0080, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x0080, 0x1dc1: 0x0080, 0x1dc2: 0x0080, 0x1dc3: 0x0080, 0x1dc4: 0x0080, 0x1dc5: 0x0080, + 0x1dc6: 0x0080, 0x1dc7: 0x0080, 0x1dc8: 0x0040, 0x1dc9: 0x0080, 0x1dca: 0x0080, 0x1dcb: 0x0080, + 0x1dcc: 0x0080, 0x1dcd: 0x0080, 0x1dce: 0x0080, 0x1dcf: 0x0080, 0x1dd0: 0x0080, 0x1dd1: 0x0080, + 0x1dd2: 0x0080, 0x1dd3: 0x0080, 0x1dd4: 0x0080, 0x1dd5: 0x0080, 0x1dd6: 0x0080, 0x1dd7: 0x0080, + 0x1dd8: 0x0080, 0x1dd9: 0x0080, 0x1dda: 0x0080, 0x1ddb: 0x0080, 0x1ddc: 0x0080, 0x1ddd: 0x0080, + 0x1dde: 0x0080, 0x1ddf: 0x0080, 0x1de0: 0x0080, 0x1de1: 0x0080, 0x1de2: 0x0080, 0x1de3: 0x0080, + 0x1de4: 0x0040, 0x1de5: 0x0080, 0x1de6: 0x0080, 0x1de7: 0x0080, 0x1de8: 0x0080, 0x1de9: 0x0080, + 0x1dea: 0x0080, 0x1deb: 0x0080, 0x1dec: 0x0080, 0x1ded: 0x0080, 0x1dee: 0x0080, 0x1def: 0x0080, + 0x1df0: 0x0080, 0x1df1: 0x0080, 0x1df2: 0x0080, 0x1df3: 0x0080, 0x1df4: 0x0080, 0x1df5: 0x0080, + 0x1df6: 0x0080, 0x1df7: 0x0080, 0x1df8: 0x0080, 0x1df9: 0x0080, 0x1dfa: 0x0080, 0x1dfb: 0x0080, + 0x1dfc: 0x0080, 0x1dfd: 0x0080, 0x1dfe: 0x0080, 0x1dff: 0x0080, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x0080, 0x1e01: 0x0080, 0x1e02: 0x0080, 0x1e03: 0x0080, 0x1e04: 0x0080, 0x1e05: 0x0080, + 0x1e06: 0x0080, 0x1e07: 0x0080, 0x1e08: 0x0040, 0x1e09: 0x0080, 0x1e0a: 0x0080, 0x1e0b: 0x0080, + 0x1e0c: 0x0080, 0x1e0d: 0x0080, 0x1e0e: 0x0080, 0x1e0f: 0x0080, 0x1e10: 0x0080, 0x1e11: 0x0080, + 0x1e12: 0x0080, 0x1e13: 0x0080, 0x1e14: 0x0080, 0x1e15: 0x0080, 0x1e16: 0x0080, 0x1e17: 0x0080, + 0x1e18: 0x0080, 0x1e19: 0x0080, 0x1e1a: 0x0080, 0x1e1b: 0x0080, 0x1e1c: 0x0080, 0x1e1d: 0x0080, + 0x1e1e: 0x0080, 0x1e1f: 0x0080, 0x1e20: 0x0080, 0x1e21: 0x0080, 0x1e22: 0x0080, 0x1e23: 0x0080, + 0x1e30: 0x1000, 0x1e31: 0x1000, 0x1e32: 0x1000, 0x1e33: 0x1000, 0x1e34: 0x1000, 0x1e35: 0x1000, + 0x1e36: 0x1000, 0x1e37: 0x1000, 0x1e38: 0x1000, 0x1e39: 0x1000, 0x1e3a: 0x1000, 0x1e3b: 0x1000, + 0x1e3c: 0x1000, 0x1e3d: 0x1000, 0x1e3e: 0x1000, 0x1e3f: 0x1000, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x1000, 0x1e41: 0x1000, 0x1e42: 0x1000, 0x1e43: 0x1000, 0x1e44: 0x1000, 0x1e45: 0x1000, + 0x1e46: 0x1000, 0x1e4b: 0x0800, + 0x1e4c: 0x0800, 0x1e4d: 0x0800, 0x1e4e: 0x0800, 0x1e4f: 0x0800, 0x1e50: 0x0800, 0x1e51: 0x0800, + 0x1e52: 0x0800, 0x1e53: 0x0800, 0x1e54: 0x0800, 0x1e55: 0x0800, 0x1e56: 0x0800, 0x1e57: 0x0800, + 0x1e58: 0x0800, 0x1e59: 0x0800, 0x1e5a: 0x0800, 0x1e5b: 0x0800, 0x1e5c: 0x0800, 0x1e5d: 0x0800, + 0x1e5e: 0x0800, 0x1e5f: 0x0800, 0x1e60: 0x0800, 0x1e61: 0x0800, 0x1e62: 0x0800, 0x1e63: 0x0800, + 0x1e64: 0x0800, 0x1e65: 0x0800, 0x1e66: 0x0800, 0x1e67: 0x0800, 0x1e68: 0x0800, 0x1e69: 0x0800, + 0x1e6a: 0x0800, 0x1e6b: 0x0800, 0x1e6c: 0x0800, 0x1e6d: 0x0800, 0x1e6e: 0x0800, 0x1e6f: 0x0800, + 0x1e70: 0x0800, 0x1e71: 0x0800, 0x1e72: 0x0800, 0x1e73: 0x0800, 0x1e74: 0x0800, 0x1e75: 0x0800, + 0x1e76: 0x0800, 0x1e77: 0x0800, 0x1e78: 0x0800, 0x1e79: 0x0800, 0x1e7a: 0x0800, 0x1e7b: 0x0800, + // Block 0x7a, offset 0x1e80 + 0x1e9e: 0x0004, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x0004, 0x1ec1: 0x0004, 0x1ec2: 0x0004, 0x1ec3: 0x0004, 0x1ec4: 0x0004, 0x1ec5: 0x0004, + 0x1ec6: 0x0004, 0x1ec7: 0x0004, 0x1ec8: 0x0004, 0x1ec9: 0x0004, 0x1eca: 0x0004, 0x1ecb: 0x0004, + 0x1ecc: 0x0004, 0x1ecd: 0x0004, 0x1ece: 0x0004, 0x1ecf: 0x0004, + 0x1ee0: 0x0004, 0x1ee1: 0x0004, 0x1ee2: 0x0004, 0x1ee3: 0x0004, + 0x1ee4: 0x0004, 0x1ee5: 0x0004, 0x1ee6: 0x0004, 0x1ee7: 0x0004, 0x1ee8: 0x0004, 0x1ee9: 0x0004, + 0x1eea: 0x0004, 0x1eeb: 0x0004, 0x1eec: 0x0004, 0x1eed: 0x0004, 0x1eee: 0x0004, 0x1eef: 0x0004, + // Block 0x7c, offset 0x1f00 + 0x1f3f: 0x0002, + // Block 0x7d, offset 0x1f40 + 0x1f70: 0x0002, 0x1f71: 0x0002, 0x1f72: 0x0002, 0x1f73: 0x0002, 0x1f74: 0x0002, 0x1f75: 0x0002, + 0x1f76: 0x0002, 0x1f77: 0x0002, 0x1f78: 0x0002, 0x1f79: 0x0002, 0x1f7a: 0x0002, 0x1f7b: 0x0002, + // Block 0x7e, offset 0x1f80 + 0x1fbd: 0x0004, + // Block 0x7f, offset 0x1fc0 + 0x1fe0: 0x0004, + // Block 0x80, offset 0x2000 + 0x2036: 0x0004, 0x2037: 0x0004, 0x2038: 0x0004, 0x2039: 0x0004, 0x203a: 0x0004, + // Block 0x81, offset 0x2040 + 0x2041: 0x0004, 0x2042: 0x0004, 0x2043: 0x0004, 0x2045: 0x0004, + 0x2046: 0x0004, + 0x204c: 0x0004, 0x204d: 0x0004, 0x204e: 0x0004, 0x204f: 0x0004, + 0x2078: 0x0004, 0x2079: 0x0004, 0x207a: 0x0004, + 0x207f: 0x0004, + // Block 0x82, offset 0x2080 + 0x20a5: 0x0004, 0x20a6: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20e4: 0x0004, 0x20e5: 0x0004, 0x20e6: 0x0004, 0x20e7: 0x0004, + // Block 0x84, offset 0x2100 + 0x212b: 0x0004, 0x212c: 0x0004, + // Block 0x85, offset 0x2140 + 0x217d: 0x0004, 0x217e: 0x0004, 0x217f: 0x0004, + // Block 0x86, offset 0x2180 + 0x2186: 0x0004, 0x2187: 0x0004, 0x2188: 0x0004, 0x2189: 0x0004, 0x218a: 0x0004, 0x218b: 0x0004, + 0x218c: 0x0004, 0x218d: 0x0004, 0x218e: 0x0004, 0x218f: 0x0004, 0x2190: 0x0004, + // Block 0x87, offset 0x21c0 + 0x21c2: 0x0004, 0x21c3: 0x0004, 0x21c4: 0x0004, 0x21c5: 0x0004, + // Block 0x88, offset 0x2200 + 0x2200: 0x0400, 0x2201: 0x0004, 0x2202: 0x0400, + 0x2238: 0x0004, 0x2239: 0x0004, 0x223a: 0x0004, 0x223b: 0x0004, + 0x223c: 0x0004, 0x223d: 0x0004, 0x223e: 0x0004, 0x223f: 0x0004, + // Block 0x89, offset 0x2240 + 0x2240: 0x0004, 0x2241: 0x0004, 0x2242: 0x0004, 0x2243: 0x0004, 0x2244: 0x0004, 0x2245: 0x0004, + 0x2246: 0x0004, + 0x2270: 0x0004, 0x2273: 0x0004, 0x2274: 0x0004, + 0x227f: 0x0004, + // Block 0x8a, offset 0x2280 + 0x2280: 0x0004, 0x2281: 0x0004, 0x2282: 0x0400, + 0x22b0: 0x0400, 0x22b1: 0x0400, 0x22b2: 0x0400, 0x22b3: 0x0004, 0x22b4: 0x0004, 0x22b5: 0x0004, + 0x22b6: 0x0004, 0x22b7: 0x0400, 0x22b8: 0x0400, 0x22b9: 0x0004, 0x22ba: 0x0004, + 0x22bd: 0x0100, + // Block 0x8b, offset 0x22c0 + 0x22c2: 0x0004, + 0x22cd: 0x0100, + // Block 0x8c, offset 0x2300 + 0x2300: 0x0004, 0x2301: 0x0004, 0x2302: 0x0004, + 0x2327: 0x0004, 0x2328: 0x0004, 0x2329: 0x0004, + 0x232a: 0x0004, 0x232b: 0x0004, 0x232c: 0x0400, 0x232d: 0x0004, 0x232e: 0x0004, 0x232f: 0x0004, + 0x2330: 0x0004, 0x2331: 0x0004, 0x2332: 0x0004, 0x2333: 0x0004, 0x2334: 0x0004, + // Block 0x8d, offset 0x2340 + 0x2345: 0x0400, + 0x2346: 0x0400, + 0x2373: 0x0004, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0004, 0x2381: 0x0004, 0x2382: 0x0400, + 0x23b3: 0x0400, 0x23b4: 0x0400, 0x23b5: 0x0400, + 0x23b6: 0x0004, 0x23b7: 0x0004, 0x23b8: 0x0004, 0x23b9: 0x0004, 0x23ba: 0x0004, 0x23bb: 0x0004, + 0x23bc: 0x0004, 0x23bd: 0x0004, 0x23be: 0x0004, 0x23bf: 0x0400, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0400, 0x23c2: 0x0100, 0x23c3: 0x0100, + 0x23c9: 0x0004, 0x23ca: 0x0004, 0x23cb: 0x0004, + 0x23cc: 0x0004, 0x23ce: 0x0400, 0x23cf: 0x0004, + // Block 0x90, offset 0x2400 + 0x242c: 0x0400, 0x242d: 0x0400, 0x242e: 0x0400, 0x242f: 0x0004, + 0x2430: 0x0004, 0x2431: 0x0004, 0x2432: 0x0400, 0x2433: 0x0400, 0x2434: 0x0004, 0x2435: 0x0400, + 0x2436: 0x0004, 0x2437: 0x0004, + 0x243e: 0x0004, + // Block 0x91, offset 0x2440 + 0x2441: 0x0004, + // Block 0x92, offset 0x2480 + 0x249f: 0x0004, 0x24a0: 0x0400, 0x24a1: 0x0400, 0x24a2: 0x0400, 0x24a3: 0x0004, + 0x24a4: 0x0004, 0x24a5: 0x0004, 0x24a6: 0x0004, 0x24a7: 0x0004, 0x24a8: 0x0004, 0x24a9: 0x0004, + 0x24aa: 0x0004, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x0004, 0x24c1: 0x0400, 0x24c2: 0x0400, 0x24c3: 0x0400, 0x24c4: 0x0400, + 0x24c7: 0x0400, 0x24c8: 0x0400, 0x24cb: 0x0400, + 0x24cc: 0x0400, 0x24cd: 0x0400, + 0x24d7: 0x0004, + 0x24e2: 0x0400, 0x24e3: 0x0400, + 0x24e6: 0x0004, 0x24e7: 0x0004, 0x24e8: 0x0004, 0x24e9: 0x0004, + 0x24ea: 0x0004, 0x24eb: 0x0004, 0x24ec: 0x0004, + 0x24f0: 0x0004, 0x24f1: 0x0004, 0x24f2: 0x0004, 0x24f3: 0x0004, 0x24f4: 0x0004, + // Block 0x94, offset 0x2500 + 0x2535: 0x0400, + 0x2536: 0x0400, 0x2537: 0x0400, 0x2538: 0x0004, 0x2539: 0x0004, 0x253a: 0x0004, 0x253b: 0x0004, + 0x253c: 0x0004, 0x253d: 0x0004, 0x253e: 0x0004, 0x253f: 0x0004, + // Block 0x95, offset 0x2540 + 0x2540: 0x0400, 0x2541: 0x0400, 0x2542: 0x0004, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0400, + 0x2546: 0x0004, + 0x255e: 0x0004, + // Block 0x96, offset 0x2580 + 0x25b0: 0x0004, 0x25b1: 0x0400, 0x25b2: 0x0400, 0x25b3: 0x0004, 0x25b4: 0x0004, 0x25b5: 0x0004, + 0x25b6: 0x0004, 0x25b7: 0x0004, 0x25b8: 0x0004, 0x25b9: 0x0400, 0x25ba: 0x0004, 0x25bb: 0x0400, + 0x25bc: 0x0400, 0x25bd: 0x0004, 0x25be: 0x0400, 0x25bf: 0x0004, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x0004, 0x25c1: 0x0400, 0x25c2: 0x0004, 0x25c3: 0x0004, + // Block 0x98, offset 0x2600 + 0x262f: 0x0004, + 0x2630: 0x0400, 0x2631: 0x0400, 0x2632: 0x0004, 0x2633: 0x0004, 0x2634: 0x0004, 0x2635: 0x0004, + 0x2638: 0x0400, 0x2639: 0x0400, 0x263a: 0x0400, 0x263b: 0x0400, + 0x263c: 0x0004, 0x263d: 0x0004, 0x263e: 0x0400, 0x263f: 0x0004, + // Block 0x99, offset 0x2640 + 0x2640: 0x0004, + 0x265c: 0x0004, 0x265d: 0x0004, + // Block 0x9a, offset 0x2680 + 0x26b0: 0x0400, 0x26b1: 0x0400, 0x26b2: 0x0400, 0x26b3: 0x0004, 0x26b4: 0x0004, 0x26b5: 0x0004, + 0x26b6: 0x0004, 0x26b7: 0x0004, 0x26b8: 0x0004, 0x26b9: 0x0004, 0x26ba: 0x0004, 0x26bb: 0x0400, + 0x26bc: 0x0400, 0x26bd: 0x0004, 0x26be: 0x0400, 0x26bf: 0x0004, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0004, + // Block 0x9c, offset 0x2700 + 0x272b: 0x0004, 0x272c: 0x0400, 0x272d: 0x0004, 0x272e: 0x0400, 0x272f: 0x0400, + 0x2730: 0x0004, 0x2731: 0x0004, 0x2732: 0x0004, 0x2733: 0x0004, 0x2734: 0x0004, 0x2735: 0x0004, + 0x2736: 0x0400, 0x2737: 0x0004, + // Block 0x9d, offset 0x2740 + 0x275d: 0x0004, + 0x275e: 0x0004, 0x275f: 0x0004, 0x2762: 0x0004, 0x2763: 0x0004, + 0x2764: 0x0004, 0x2765: 0x0004, 0x2766: 0x0400, 0x2767: 0x0004, 0x2768: 0x0004, 0x2769: 0x0004, + 0x276a: 0x0004, 0x276b: 0x0004, + // Block 0x9e, offset 0x2780 + 0x27ac: 0x0400, 0x27ad: 0x0400, 0x27ae: 0x0400, 0x27af: 0x0004, + 0x27b0: 0x0004, 0x27b1: 0x0004, 0x27b2: 0x0004, 0x27b3: 0x0004, 0x27b4: 0x0004, 0x27b5: 0x0004, + 0x27b6: 0x0004, 0x27b7: 0x0004, 0x27b8: 0x0400, 0x27b9: 0x0004, 0x27ba: 0x0004, + // Block 0x9f, offset 0x27c0 + 0x27f0: 0x0004, 0x27f1: 0x0400, 0x27f2: 0x0400, 0x27f3: 0x0400, 0x27f4: 0x0400, 0x27f5: 0x0400, + 0x27f7: 0x0400, 0x27f8: 0x0400, 0x27fb: 0x0004, + 0x27fc: 0x0004, 0x27fd: 0x0400, 0x27fe: 0x0004, 0x27ff: 0x0100, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0400, 0x2801: 0x0100, 0x2802: 0x0400, 0x2803: 0x0004, + // Block 0xa1, offset 0x2840 + 0x2851: 0x0400, + 0x2852: 0x0400, 0x2853: 0x0400, 0x2854: 0x0004, 0x2855: 0x0004, 0x2856: 0x0004, 0x2857: 0x0004, + 0x285a: 0x0004, 0x285b: 0x0004, 0x285c: 0x0400, 0x285d: 0x0400, + 0x285e: 0x0400, 0x285f: 0x0400, 0x2860: 0x0004, + 0x2864: 0x0400, + // Block 0xa2, offset 0x2880 + 0x2881: 0x0004, 0x2882: 0x0004, 0x2883: 0x0004, 0x2884: 0x0004, 0x2885: 0x0004, + 0x2886: 0x0004, 0x2887: 0x0004, 0x2888: 0x0004, 0x2889: 0x0004, 0x288a: 0x0004, + 0x28b3: 0x0004, 0x28b4: 0x0004, 0x28b5: 0x0004, + 0x28b6: 0x0004, 0x28b7: 0x0004, 0x28b8: 0x0004, 0x28b9: 0x0400, 0x28ba: 0x0100, 0x28bb: 0x0004, + 0x28bc: 0x0004, 0x28bd: 0x0004, 0x28be: 0x0004, + // Block 0xa3, offset 0x28c0 + 0x28c7: 0x0004, + 0x28d1: 0x0004, + 0x28d2: 0x0004, 0x28d3: 0x0004, 0x28d4: 0x0004, 0x28d5: 0x0004, 0x28d6: 0x0004, 0x28d7: 0x0400, + 0x28d8: 0x0400, 0x28d9: 0x0004, 0x28da: 0x0004, 0x28db: 0x0004, + // Block 0xa4, offset 0x2900 + 0x2904: 0x0100, 0x2905: 0x0100, + 0x2906: 0x0100, 0x2907: 0x0100, 0x2908: 0x0100, 0x2909: 0x0100, 0x290a: 0x0004, 0x290b: 0x0004, + 0x290c: 0x0004, 0x290d: 0x0004, 0x290e: 0x0004, 0x290f: 0x0004, 0x2910: 0x0004, 0x2911: 0x0004, + 0x2912: 0x0004, 0x2913: 0x0004, 0x2914: 0x0004, 0x2915: 0x0004, 0x2916: 0x0004, 0x2917: 0x0400, + 0x2918: 0x0004, 0x2919: 0x0004, + // Block 0xa5, offset 0x2940 + 0x296f: 0x0400, + 0x2970: 0x0004, 0x2971: 0x0004, 0x2972: 0x0004, 0x2973: 0x0004, 0x2974: 0x0004, 0x2975: 0x0004, + 0x2976: 0x0004, 0x2978: 0x0004, 0x2979: 0x0004, 0x297a: 0x0004, 0x297b: 0x0004, + 0x297c: 0x0004, 0x297d: 0x0004, 0x297e: 0x0400, 0x297f: 0x0004, + // Block 0xa6, offset 0x2980 + 0x2992: 0x0004, 0x2993: 0x0004, 0x2994: 0x0004, 0x2995: 0x0004, 0x2996: 0x0004, 0x2997: 0x0004, + 0x2998: 0x0004, 0x2999: 0x0004, 0x299a: 0x0004, 0x299b: 0x0004, 0x299c: 0x0004, 0x299d: 0x0004, + 0x299e: 0x0004, 0x299f: 0x0004, 0x29a0: 0x0004, 0x29a1: 0x0004, 0x29a2: 0x0004, 0x29a3: 0x0004, + 0x29a4: 0x0004, 0x29a5: 0x0004, 0x29a6: 0x0004, 0x29a7: 0x0004, 0x29a9: 0x0400, + 0x29aa: 0x0004, 0x29ab: 0x0004, 0x29ac: 0x0004, 0x29ad: 0x0004, 0x29ae: 0x0004, 0x29af: 0x0004, + 0x29b0: 0x0004, 0x29b1: 0x0400, 0x29b2: 0x0004, 0x29b3: 0x0004, 0x29b4: 0x0400, 0x29b5: 0x0004, + 0x29b6: 0x0004, + // Block 0xa7, offset 0x29c0 + 0x29f1: 0x0004, 0x29f2: 0x0004, 0x29f3: 0x0004, 0x29f4: 0x0004, 0x29f5: 0x0004, + 0x29f6: 0x0004, 0x29fa: 0x0004, + 0x29fc: 0x0004, 0x29fd: 0x0004, 0x29ff: 0x0004, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x0004, 0x2a01: 0x0004, 0x2a02: 0x0004, 0x2a03: 0x0004, 0x2a04: 0x0004, 0x2a05: 0x0004, + 0x2a06: 0x0100, 0x2a07: 0x0004, + // Block 0xa9, offset 0x2a40 + 0x2a4a: 0x0400, 0x2a4b: 0x0400, + 0x2a4c: 0x0400, 0x2a4d: 0x0400, 0x2a4e: 0x0400, 0x2a50: 0x0004, 0x2a51: 0x0004, + 0x2a53: 0x0400, 0x2a54: 0x0400, 0x2a55: 0x0004, 0x2a56: 0x0400, 0x2a57: 0x0004, + // Block 0xaa, offset 0x2a80 + 0x2ab3: 0x0004, 0x2ab4: 0x0004, 0x2ab5: 0x0400, + 0x2ab6: 0x0400, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x0004, 0x2ac1: 0x0004, 0x2ac2: 0x0100, 0x2ac3: 0x0400, + 0x2af4: 0x0400, 0x2af5: 0x0400, + 0x2af6: 0x0004, 0x2af7: 0x0004, 0x2af8: 0x0004, 0x2af9: 0x0004, 0x2afa: 0x0004, + 0x2afe: 0x0400, 0x2aff: 0x0400, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x0004, 0x2b01: 0x0400, 0x2b02: 0x0004, + // Block 0xad, offset 0x2b40 + 0x2b70: 0x0002, 0x2b71: 0x0002, 0x2b72: 0x0002, 0x2b73: 0x0002, 0x2b74: 0x0002, 0x2b75: 0x0002, + 0x2b76: 0x0002, 0x2b77: 0x0002, 0x2b78: 0x0002, 0x2b79: 0x0002, 0x2b7a: 0x0002, 0x2b7b: 0x0002, + 0x2b7c: 0x0002, 0x2b7d: 0x0002, 0x2b7e: 0x0002, 0x2b7f: 0x0002, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x0004, + 0x2b87: 0x0004, 0x2b88: 0x0004, 0x2b89: 0x0004, 0x2b8a: 0x0004, 0x2b8b: 0x0004, + 0x2b8c: 0x0004, 0x2b8d: 0x0004, 0x2b8e: 0x0004, 0x2b8f: 0x0004, 0x2b90: 0x0004, 0x2b91: 0x0004, + 0x2b92: 0x0004, 0x2b93: 0x0004, 0x2b94: 0x0004, 0x2b95: 0x0004, + // Block 0xaf, offset 0x2bc0 + 0x2bf0: 0x0004, 0x2bf1: 0x0004, 0x2bf2: 0x0004, 0x2bf3: 0x0004, 0x2bf4: 0x0004, + // Block 0xb0, offset 0x2c00 + 0x2c30: 0x0004, 0x2c31: 0x0004, 0x2c32: 0x0004, 0x2c33: 0x0004, 0x2c34: 0x0004, 0x2c35: 0x0004, + 0x2c36: 0x0004, + // Block 0xb1, offset 0x2c40 + 0x2c4f: 0x0004, 0x2c51: 0x0400, + 0x2c52: 0x0400, 0x2c53: 0x0400, 0x2c54: 0x0400, 0x2c55: 0x0400, 0x2c56: 0x0400, 0x2c57: 0x0400, + 0x2c58: 0x0400, 0x2c59: 0x0400, 0x2c5a: 0x0400, 0x2c5b: 0x0400, 0x2c5c: 0x0400, 0x2c5d: 0x0400, + 0x2c5e: 0x0400, 0x2c5f: 0x0400, 0x2c60: 0x0400, 0x2c61: 0x0400, 0x2c62: 0x0400, 0x2c63: 0x0400, + 0x2c64: 0x0400, 0x2c65: 0x0400, 0x2c66: 0x0400, 0x2c67: 0x0400, 0x2c68: 0x0400, 0x2c69: 0x0400, + 0x2c6a: 0x0400, 0x2c6b: 0x0400, 0x2c6c: 0x0400, 0x2c6d: 0x0400, 0x2c6e: 0x0400, 0x2c6f: 0x0400, + 0x2c70: 0x0400, 0x2c71: 0x0400, 0x2c72: 0x0400, 0x2c73: 0x0400, 0x2c74: 0x0400, 0x2c75: 0x0400, + 0x2c76: 0x0400, 0x2c77: 0x0400, 0x2c78: 0x0400, 0x2c79: 0x0400, 0x2c7a: 0x0400, 0x2c7b: 0x0400, + 0x2c7c: 0x0400, 0x2c7d: 0x0400, 0x2c7e: 0x0400, 0x2c7f: 0x0400, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x0400, 0x2c81: 0x0400, 0x2c82: 0x0400, 0x2c83: 0x0400, 0x2c84: 0x0400, 0x2c85: 0x0400, + 0x2c86: 0x0400, 0x2c87: 0x0400, + 0x2c8f: 0x0004, 0x2c90: 0x0004, 0x2c91: 0x0004, + 0x2c92: 0x0004, + // Block 0xb3, offset 0x2cc0 + 0x2ce4: 0x0004, + 0x2cf0: 0x0400, 0x2cf1: 0x0400, + // Block 0xb4, offset 0x2d00 + 0x2d1d: 0x0004, + 0x2d1e: 0x0004, 0x2d20: 0x0002, 0x2d21: 0x0002, 0x2d22: 0x0002, 0x2d23: 0x0002, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x0004, 0x2d41: 0x0004, 0x2d42: 0x0004, 0x2d43: 0x0004, 0x2d44: 0x0004, 0x2d45: 0x0004, + 0x2d46: 0x0004, 0x2d47: 0x0004, 0x2d48: 0x0004, 0x2d49: 0x0004, 0x2d4a: 0x0004, 0x2d4b: 0x0004, + 0x2d4c: 0x0004, 0x2d4d: 0x0004, 0x2d4e: 0x0004, 0x2d4f: 0x0004, 0x2d50: 0x0004, 0x2d51: 0x0004, + 0x2d52: 0x0004, 0x2d53: 0x0004, 0x2d54: 0x0004, 0x2d55: 0x0004, 0x2d56: 0x0004, 0x2d57: 0x0004, + 0x2d58: 0x0004, 0x2d59: 0x0004, 0x2d5a: 0x0004, 0x2d5b: 0x0004, 0x2d5c: 0x0004, 0x2d5d: 0x0004, + 0x2d5e: 0x0004, 0x2d5f: 0x0004, 0x2d60: 0x0004, 0x2d61: 0x0004, 0x2d62: 0x0004, 0x2d63: 0x0004, + 0x2d64: 0x0004, 0x2d65: 0x0004, 0x2d66: 0x0004, 0x2d67: 0x0004, 0x2d68: 0x0004, 0x2d69: 0x0004, + 0x2d6a: 0x0004, 0x2d6b: 0x0004, 0x2d6c: 0x0004, 0x2d6d: 0x0004, + 0x2d70: 0x0004, 0x2d71: 0x0004, 0x2d72: 0x0004, 0x2d73: 0x0004, 0x2d74: 0x0004, 0x2d75: 0x0004, + 0x2d76: 0x0004, 0x2d77: 0x0004, 0x2d78: 0x0004, 0x2d79: 0x0004, 0x2d7a: 0x0004, 0x2d7b: 0x0004, + 0x2d7c: 0x0004, 0x2d7d: 0x0004, 0x2d7e: 0x0004, 0x2d7f: 0x0004, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x0004, 0x2d81: 0x0004, 0x2d82: 0x0004, 0x2d83: 0x0004, 0x2d84: 0x0004, 0x2d85: 0x0004, + 0x2d86: 0x0004, + // Block 0xb7, offset 0x2dc0 + 0x2de5: 0x0004, 0x2de6: 0x0400, 0x2de7: 0x0004, 0x2de8: 0x0004, 0x2de9: 0x0004, + 0x2ded: 0x0400, 0x2dee: 0x0004, 0x2def: 0x0004, + 0x2df0: 0x0004, 0x2df1: 0x0004, 0x2df2: 0x0004, 0x2df3: 0x0002, 0x2df4: 0x0002, 0x2df5: 0x0002, + 0x2df6: 0x0002, 0x2df7: 0x0002, 0x2df8: 0x0002, 0x2df9: 0x0002, 0x2dfa: 0x0002, 0x2dfb: 0x0004, + 0x2dfc: 0x0004, 0x2dfd: 0x0004, 0x2dfe: 0x0004, 0x2dff: 0x0004, + // Block 0xb8, offset 0x2e00 + 0x2e00: 0x0004, 0x2e01: 0x0004, 0x2e02: 0x0004, 0x2e05: 0x0004, + 0x2e06: 0x0004, 0x2e07: 0x0004, 0x2e08: 0x0004, 0x2e09: 0x0004, 0x2e0a: 0x0004, 0x2e0b: 0x0004, + 0x2e2a: 0x0004, 0x2e2b: 0x0004, 0x2e2c: 0x0004, 0x2e2d: 0x0004, + // Block 0xb9, offset 0x2e40 + 0x2e42: 0x0004, 0x2e43: 0x0004, 0x2e44: 0x0004, + // Block 0xba, offset 0x2e80 + 0x2e80: 0x0004, 0x2e81: 0x0004, 0x2e82: 0x0004, 0x2e83: 0x0004, 0x2e84: 0x0004, 0x2e85: 0x0004, + 0x2e86: 0x0004, 0x2e87: 0x0004, 0x2e88: 0x0004, 0x2e89: 0x0004, 0x2e8a: 0x0004, 0x2e8b: 0x0004, + 0x2e8c: 0x0004, 0x2e8d: 0x0004, 0x2e8e: 0x0004, 0x2e8f: 0x0004, 0x2e90: 0x0004, 0x2e91: 0x0004, + 0x2e92: 0x0004, 0x2e93: 0x0004, 0x2e94: 0x0004, 0x2e95: 0x0004, 0x2e96: 0x0004, 0x2e97: 0x0004, + 0x2e98: 0x0004, 0x2e99: 0x0004, 0x2e9a: 0x0004, 0x2e9b: 0x0004, 0x2e9c: 0x0004, 0x2e9d: 0x0004, + 0x2e9e: 0x0004, 0x2e9f: 0x0004, 0x2ea0: 0x0004, 0x2ea1: 0x0004, 0x2ea2: 0x0004, 0x2ea3: 0x0004, + 0x2ea4: 0x0004, 0x2ea5: 0x0004, 0x2ea6: 0x0004, 0x2ea7: 0x0004, 0x2ea8: 0x0004, 0x2ea9: 0x0004, + 0x2eaa: 0x0004, 0x2eab: 0x0004, 0x2eac: 0x0004, 0x2ead: 0x0004, 0x2eae: 0x0004, 0x2eaf: 0x0004, + 0x2eb0: 0x0004, 0x2eb1: 0x0004, 0x2eb2: 0x0004, 0x2eb3: 0x0004, 0x2eb4: 0x0004, 0x2eb5: 0x0004, + 0x2eb6: 0x0004, 0x2ebb: 0x0004, + 0x2ebc: 0x0004, 0x2ebd: 0x0004, 0x2ebe: 0x0004, 0x2ebf: 0x0004, + // Block 0xbb, offset 0x2ec0 + 0x2ec0: 0x0004, 0x2ec1: 0x0004, 0x2ec2: 0x0004, 0x2ec3: 0x0004, 0x2ec4: 0x0004, 0x2ec5: 0x0004, + 0x2ec6: 0x0004, 0x2ec7: 0x0004, 0x2ec8: 0x0004, 0x2ec9: 0x0004, 0x2eca: 0x0004, 0x2ecb: 0x0004, + 0x2ecc: 0x0004, 0x2ecd: 0x0004, 0x2ece: 0x0004, 0x2ecf: 0x0004, 0x2ed0: 0x0004, 0x2ed1: 0x0004, + 0x2ed2: 0x0004, 0x2ed3: 0x0004, 0x2ed4: 0x0004, 0x2ed5: 0x0004, 0x2ed6: 0x0004, 0x2ed7: 0x0004, + 0x2ed8: 0x0004, 0x2ed9: 0x0004, 0x2eda: 0x0004, 0x2edb: 0x0004, 0x2edc: 0x0004, 0x2edd: 0x0004, + 0x2ede: 0x0004, 0x2edf: 0x0004, 0x2ee0: 0x0004, 0x2ee1: 0x0004, 0x2ee2: 0x0004, 0x2ee3: 0x0004, + 0x2ee4: 0x0004, 0x2ee5: 0x0004, 0x2ee6: 0x0004, 0x2ee7: 0x0004, 0x2ee8: 0x0004, 0x2ee9: 0x0004, + 0x2eea: 0x0004, 0x2eeb: 0x0004, 0x2eec: 0x0004, + 0x2ef5: 0x0004, + // Block 0xbc, offset 0x2f00 + 0x2f04: 0x0004, + 0x2f1b: 0x0004, 0x2f1c: 0x0004, 0x2f1d: 0x0004, + 0x2f1e: 0x0004, 0x2f1f: 0x0004, 0x2f21: 0x0004, 0x2f22: 0x0004, 0x2f23: 0x0004, + 0x2f24: 0x0004, 0x2f25: 0x0004, 0x2f26: 0x0004, 0x2f27: 0x0004, 0x2f28: 0x0004, 0x2f29: 0x0004, + 0x2f2a: 0x0004, 0x2f2b: 0x0004, 0x2f2c: 0x0004, 0x2f2d: 0x0004, 0x2f2e: 0x0004, 0x2f2f: 0x0004, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x0004, 0x2f41: 0x0004, 0x2f42: 0x0004, 0x2f43: 0x0004, 0x2f44: 0x0004, 0x2f45: 0x0004, + 0x2f46: 0x0004, 0x2f48: 0x0004, 0x2f49: 0x0004, 0x2f4a: 0x0004, 0x2f4b: 0x0004, + 0x2f4c: 0x0004, 0x2f4d: 0x0004, 0x2f4e: 0x0004, 0x2f4f: 0x0004, 0x2f50: 0x0004, 0x2f51: 0x0004, + 0x2f52: 0x0004, 0x2f53: 0x0004, 0x2f54: 0x0004, 0x2f55: 0x0004, 0x2f56: 0x0004, 0x2f57: 0x0004, + 0x2f58: 0x0004, 0x2f5b: 0x0004, 0x2f5c: 0x0004, 0x2f5d: 0x0004, + 0x2f5e: 0x0004, 0x2f5f: 0x0004, 0x2f60: 0x0004, 0x2f61: 0x0004, 0x2f63: 0x0004, + 0x2f64: 0x0004, 0x2f66: 0x0004, 0x2f67: 0x0004, 0x2f68: 0x0004, 0x2f69: 0x0004, + 0x2f6a: 0x0004, + // Block 0xbe, offset 0x2f80 + 0x2f8f: 0x0004, + // Block 0xbf, offset 0x2fc0 + 0x2fee: 0x0004, + // Block 0xc0, offset 0x3000 + 0x302c: 0x0004, 0x302d: 0x0004, 0x302e: 0x0004, 0x302f: 0x0004, + // Block 0xc1, offset 0x3040 + 0x3050: 0x0004, 0x3051: 0x0004, + 0x3052: 0x0004, 0x3053: 0x0004, 0x3054: 0x0004, 0x3055: 0x0004, 0x3056: 0x0004, + // Block 0xc2, offset 0x3080 + 0x3084: 0x0004, 0x3085: 0x0004, + 0x3086: 0x0004, 0x3087: 0x0004, 0x3088: 0x0004, 0x3089: 0x0004, 0x308a: 0x0004, + // Block 0xc3, offset 0x30c0 + 0x30cd: 0x0008, 0x30ce: 0x0008, 0x30cf: 0x0008, + 0x30ef: 0x0008, + // Block 0xc4, offset 0x3100 + 0x312c: 0x0008, 0x312d: 0x0008, 0x312e: 0x0008, 0x312f: 0x0008, + 0x3130: 0x0008, 0x3131: 0x0008, + 0x313e: 0x0008, 0x313f: 0x0008, + // Block 0xc5, offset 0x3140 + 0x314e: 0x0008, 0x3151: 0x0008, + 0x3152: 0x0008, 0x3153: 0x0008, 0x3154: 0x0008, 0x3155: 0x0008, 0x3156: 0x0008, 0x3157: 0x0008, + 0x3158: 0x0008, 0x3159: 0x0008, 0x315a: 0x0008, + 0x316d: 0x0008, 0x316e: 0x0008, 0x316f: 0x0008, + 0x3170: 0x0008, 0x3171: 0x0008, 0x3172: 0x0008, 0x3173: 0x0008, 0x3174: 0x0008, 0x3175: 0x0008, + 0x3176: 0x0008, 0x3177: 0x0008, 0x3178: 0x0008, 0x3179: 0x0008, 0x317a: 0x0008, 0x317b: 0x0008, + 0x317c: 0x0008, 0x317d: 0x0008, 0x317e: 0x0008, 0x317f: 0x0008, + // Block 0xc6, offset 0x3180 + 0x3180: 0x0008, 0x3181: 0x0008, 0x3182: 0x0008, 0x3183: 0x0008, 0x3184: 0x0008, 0x3185: 0x0008, + 0x3186: 0x0008, 0x3187: 0x0008, 0x3188: 0x0008, 0x3189: 0x0008, 0x318a: 0x0008, 0x318b: 0x0008, + 0x318c: 0x0008, 0x318d: 0x0008, 0x318e: 0x0008, 0x318f: 0x0008, 0x3190: 0x0008, 0x3191: 0x0008, + 0x3192: 0x0008, 0x3193: 0x0008, 0x3194: 0x0008, 0x3195: 0x0008, 0x3196: 0x0008, 0x3197: 0x0008, + 0x3198: 0x0008, 0x3199: 0x0008, 0x319a: 0x0008, 0x319b: 0x0008, 0x319c: 0x0008, 0x319d: 0x0008, + 0x319e: 0x0008, 0x319f: 0x0008, 0x31a0: 0x0008, 0x31a1: 0x0008, 0x31a2: 0x0008, 0x31a3: 0x0008, + 0x31a4: 0x0008, 0x31a5: 0x0008, 0x31a6: 0x0200, 0x31a7: 0x0200, 0x31a8: 0x0200, 0x31a9: 0x0200, + 0x31aa: 0x0200, 0x31ab: 0x0200, 0x31ac: 0x0200, 0x31ad: 0x0200, 0x31ae: 0x0200, 0x31af: 0x0200, + 0x31b0: 0x0200, 0x31b1: 0x0200, 0x31b2: 0x0200, 0x31b3: 0x0200, 0x31b4: 0x0200, 0x31b5: 0x0200, + 0x31b6: 0x0200, 0x31b7: 0x0200, 0x31b8: 0x0200, 0x31b9: 0x0200, 0x31ba: 0x0200, 0x31bb: 0x0200, + 0x31bc: 0x0200, 0x31bd: 0x0200, 0x31be: 0x0200, 0x31bf: 0x0200, + // Block 0xc7, offset 0x31c0 + 0x31c1: 0x0008, 0x31c2: 0x0008, 0x31c3: 0x0008, 0x31c4: 0x0008, 0x31c5: 0x0008, + 0x31c6: 0x0008, 0x31c7: 0x0008, 0x31c8: 0x0008, 0x31c9: 0x0008, 0x31ca: 0x0008, 0x31cb: 0x0008, + 0x31cc: 0x0008, 0x31cd: 0x0008, 0x31ce: 0x0008, 0x31cf: 0x0008, + 0x31da: 0x0008, + 0x31ef: 0x0008, + 0x31f2: 0x0008, 0x31f3: 0x0008, 0x31f4: 0x0008, 0x31f5: 0x0008, + 0x31f6: 0x0008, 0x31f7: 0x0008, 0x31f8: 0x0008, 0x31f9: 0x0008, 0x31fa: 0x0008, + 0x31fc: 0x0008, 0x31fd: 0x0008, 0x31fe: 0x0008, 0x31ff: 0x0008, + // Block 0xc8, offset 0x3200 + 0x3209: 0x0008, 0x320a: 0x0008, 0x320b: 0x0008, + 0x320c: 0x0008, 0x320d: 0x0008, 0x320e: 0x0008, 0x320f: 0x0008, 0x3210: 0x0008, 0x3211: 0x0008, + 0x3212: 0x0008, 0x3213: 0x0008, 0x3214: 0x0008, 0x3215: 0x0008, 0x3216: 0x0008, 0x3217: 0x0008, + 0x3218: 0x0008, 0x3219: 0x0008, 0x321a: 0x0008, 0x321b: 0x0008, 0x321c: 0x0008, 0x321d: 0x0008, + 0x321e: 0x0008, 0x321f: 0x0008, 0x3220: 0x0008, 0x3221: 0x0008, 0x3222: 0x0008, 0x3223: 0x0008, + 0x3224: 0x0008, 0x3225: 0x0008, 0x3226: 0x0008, 0x3227: 0x0008, 0x3228: 0x0008, 0x3229: 0x0008, + 0x322a: 0x0008, 0x322b: 0x0008, 0x322c: 0x0008, 0x322d: 0x0008, 0x322e: 0x0008, 0x322f: 0x0008, + 0x3230: 0x0008, 0x3231: 0x0008, 0x3232: 0x0008, 0x3233: 0x0008, 0x3234: 0x0008, 0x3235: 0x0008, + 0x3236: 0x0008, 0x3237: 0x0008, 0x3238: 0x0008, 0x3239: 0x0008, 0x323a: 0x0008, 0x323b: 0x0008, + 0x323c: 0x0008, 0x323d: 0x0008, 0x323e: 0x0008, 0x323f: 0x0008, + // Block 0xc9, offset 0x3240 + 0x3240: 0x0008, 0x3241: 0x0008, 0x3242: 0x0008, 0x3243: 0x0008, 0x3244: 0x0008, 0x3245: 0x0008, + 0x3246: 0x0008, 0x3247: 0x0008, 0x3248: 0x0008, 0x3249: 0x0008, 0x324a: 0x0008, 0x324b: 0x0008, + 0x324c: 0x0008, 0x324d: 0x0008, 0x324e: 0x0008, 0x324f: 0x0008, 0x3250: 0x0008, 0x3251: 0x0008, + 0x3252: 0x0008, 0x3253: 0x0008, 0x3254: 0x0008, 0x3255: 0x0008, 0x3256: 0x0008, 0x3257: 0x0008, + 0x3258: 0x0008, 0x3259: 0x0008, 0x325a: 0x0008, 0x325b: 0x0008, 0x325c: 0x0008, 0x325d: 0x0008, + 0x325e: 0x0008, 0x325f: 0x0008, 0x3260: 0x0008, 0x3261: 0x0008, 0x3262: 0x0008, 0x3263: 0x0008, + 0x3264: 0x0008, 0x3265: 0x0008, 0x3266: 0x0008, 0x3267: 0x0008, 0x3268: 0x0008, 0x3269: 0x0008, + 0x326a: 0x0008, 0x326b: 0x0008, 0x326c: 0x0008, 0x326d: 0x0008, 0x326e: 0x0008, 0x326f: 0x0008, + 0x3270: 0x0008, 0x3271: 0x0008, 0x3272: 0x0008, 0x3273: 0x0008, 0x3274: 0x0008, 0x3275: 0x0008, + 0x3276: 0x0008, 0x3277: 0x0008, 0x3278: 0x0008, 0x3279: 0x0008, 0x327a: 0x0008, 0x327b: 0x0004, + 0x327c: 0x0004, 0x327d: 0x0004, 0x327e: 0x0004, 0x327f: 0x0004, + // Block 0xca, offset 0x3280 + 0x3280: 0x0008, 0x3281: 0x0008, 0x3282: 0x0008, 0x3283: 0x0008, 0x3284: 0x0008, 0x3285: 0x0008, + 0x3286: 0x0008, 0x3287: 0x0008, 0x3288: 0x0008, 0x3289: 0x0008, 0x328a: 0x0008, 0x328b: 0x0008, + 0x328c: 0x0008, 0x328d: 0x0008, 0x328e: 0x0008, 0x328f: 0x0008, 0x3290: 0x0008, 0x3291: 0x0008, + 0x3292: 0x0008, 0x3293: 0x0008, 0x3294: 0x0008, 0x3295: 0x0008, 0x3296: 0x0008, 0x3297: 0x0008, + 0x3298: 0x0008, 0x3299: 0x0008, 0x329a: 0x0008, 0x329b: 0x0008, 0x329c: 0x0008, 0x329d: 0x0008, + 0x329e: 0x0008, 0x329f: 0x0008, 0x32a0: 0x0008, 0x32a1: 0x0008, 0x32a2: 0x0008, 0x32a3: 0x0008, + 0x32a4: 0x0008, 0x32a5: 0x0008, 0x32a6: 0x0008, 0x32a7: 0x0008, 0x32a8: 0x0008, 0x32a9: 0x0008, + 0x32aa: 0x0008, 0x32ab: 0x0008, 0x32ac: 0x0008, 0x32ad: 0x0008, 0x32ae: 0x0008, 0x32af: 0x0008, + 0x32b0: 0x0008, 0x32b1: 0x0008, 0x32b2: 0x0008, 0x32b3: 0x0008, 0x32b4: 0x0008, 0x32b5: 0x0008, + 0x32b6: 0x0008, 0x32b7: 0x0008, 0x32b8: 0x0008, 0x32b9: 0x0008, 0x32ba: 0x0008, 0x32bb: 0x0008, + 0x32bc: 0x0008, 0x32bd: 0x0008, + // Block 0xcb, offset 0x32c0 + 0x32c6: 0x0008, 0x32c7: 0x0008, 0x32c8: 0x0008, 0x32c9: 0x0008, 0x32ca: 0x0008, 0x32cb: 0x0008, + 0x32cc: 0x0008, 0x32cd: 0x0008, 0x32ce: 0x0008, 0x32cf: 0x0008, 0x32d0: 0x0008, 0x32d1: 0x0008, + 0x32d2: 0x0008, 0x32d3: 0x0008, 0x32d4: 0x0008, 0x32d5: 0x0008, 0x32d6: 0x0008, 0x32d7: 0x0008, + 0x32d8: 0x0008, 0x32d9: 0x0008, 0x32da: 0x0008, 0x32db: 0x0008, 0x32dc: 0x0008, 0x32dd: 0x0008, + 0x32de: 0x0008, 0x32df: 0x0008, 0x32e0: 0x0008, 0x32e1: 0x0008, 0x32e2: 0x0008, 0x32e3: 0x0008, + 0x32e4: 0x0008, 0x32e5: 0x0008, 0x32e6: 0x0008, 0x32e7: 0x0008, 0x32e8: 0x0008, 0x32e9: 0x0008, + 0x32ea: 0x0008, 0x32eb: 0x0008, 0x32ec: 0x0008, 0x32ed: 0x0008, 0x32ee: 0x0008, 0x32ef: 0x0008, + 0x32f0: 0x0008, 0x32f1: 0x0008, 0x32f2: 0x0008, 0x32f3: 0x0008, 0x32f4: 0x0008, 0x32f5: 0x0008, + 0x32f6: 0x0008, 0x32f7: 0x0008, 0x32f8: 0x0008, 0x32f9: 0x0008, 0x32fa: 0x0008, 0x32fb: 0x0008, + 0x32fc: 0x0008, 0x32fd: 0x0008, 0x32fe: 0x0008, 0x32ff: 0x0008, + // Block 0xcc, offset 0x3300 + 0x3300: 0x0008, 0x3301: 0x0008, 0x3302: 0x0008, 0x3303: 0x0008, 0x3304: 0x0008, 0x3305: 0x0008, + 0x3306: 0x0008, 0x3307: 0x0008, 0x3308: 0x0008, 0x3309: 0x0008, 0x330a: 0x0008, 0x330b: 0x0008, + 0x330c: 0x0008, 0x330d: 0x0008, 0x330e: 0x0008, 0x330f: 0x0008, + // Block 0xcd, offset 0x3340 + 0x3374: 0x0008, 0x3375: 0x0008, + 0x3376: 0x0008, 0x3377: 0x0008, 0x3378: 0x0008, 0x3379: 0x0008, 0x337a: 0x0008, 0x337b: 0x0008, + 0x337c: 0x0008, 0x337d: 0x0008, 0x337e: 0x0008, 0x337f: 0x0008, + // Block 0xce, offset 0x3380 + 0x3395: 0x0008, 0x3396: 0x0008, 0x3397: 0x0008, + 0x3398: 0x0008, 0x3399: 0x0008, 0x339a: 0x0008, 0x339b: 0x0008, 0x339c: 0x0008, 0x339d: 0x0008, + 0x339e: 0x0008, 0x339f: 0x0008, 0x33a0: 0x0008, 0x33a1: 0x0008, 0x33a2: 0x0008, 0x33a3: 0x0008, + 0x33a4: 0x0008, 0x33a5: 0x0008, 0x33a6: 0x0008, 0x33a7: 0x0008, 0x33a8: 0x0008, 0x33a9: 0x0008, + 0x33aa: 0x0008, 0x33ab: 0x0008, 0x33ac: 0x0008, 0x33ad: 0x0008, 0x33ae: 0x0008, 0x33af: 0x0008, + 0x33b0: 0x0008, 0x33b1: 0x0008, 0x33b2: 0x0008, 0x33b3: 0x0008, 0x33b4: 0x0008, 0x33b5: 0x0008, + 0x33b6: 0x0008, 0x33b7: 0x0008, 0x33b8: 0x0008, 0x33b9: 0x0008, 0x33ba: 0x0008, 0x33bb: 0x0008, + 0x33bc: 0x0008, 0x33bd: 0x0008, 0x33be: 0x0008, 0x33bf: 0x0008, + // Block 0xcf, offset 0x33c0 + 0x33cc: 0x0008, 0x33cd: 0x0008, 0x33ce: 0x0008, 0x33cf: 0x0008, + // Block 0xd0, offset 0x3400 + 0x3408: 0x0008, 0x3409: 0x0008, 0x340a: 0x0008, 0x340b: 0x0008, + 0x340c: 0x0008, 0x340d: 0x0008, 0x340e: 0x0008, 0x340f: 0x0008, + 0x341a: 0x0008, 0x341b: 0x0008, 0x341c: 0x0008, 0x341d: 0x0008, + 0x341e: 0x0008, 0x341f: 0x0008, + // Block 0xd1, offset 0x3440 + 0x3448: 0x0008, 0x3449: 0x0008, 0x344a: 0x0008, 0x344b: 0x0008, + 0x344c: 0x0008, 0x344d: 0x0008, 0x344e: 0x0008, 0x344f: 0x0008, + 0x346e: 0x0008, 0x346f: 0x0008, + 0x3470: 0x0008, 0x3471: 0x0008, 0x3472: 0x0008, 0x3473: 0x0008, 0x3474: 0x0008, 0x3475: 0x0008, + 0x3476: 0x0008, 0x3477: 0x0008, 0x3478: 0x0008, 0x3479: 0x0008, 0x347a: 0x0008, 0x347b: 0x0008, + 0x347c: 0x0008, 0x347d: 0x0008, 0x347e: 0x0008, 0x347f: 0x0008, + // Block 0xd2, offset 0x3480 + 0x348c: 0x0008, 0x348d: 0x0008, 0x348e: 0x0008, 0x348f: 0x0008, 0x3490: 0x0008, 0x3491: 0x0008, + 0x3492: 0x0008, 0x3493: 0x0008, 0x3494: 0x0008, 0x3495: 0x0008, 0x3496: 0x0008, 0x3497: 0x0008, + 0x3498: 0x0008, 0x3499: 0x0008, 0x349a: 0x0008, 0x349b: 0x0008, 0x349c: 0x0008, 0x349d: 0x0008, + 0x349e: 0x0008, 0x349f: 0x0008, 0x34a0: 0x0008, 0x34a1: 0x0008, 0x34a2: 0x0008, 0x34a3: 0x0008, + 0x34a4: 0x0008, 0x34a5: 0x0008, 0x34a6: 0x0008, 0x34a7: 0x0008, 0x34a8: 0x0008, 0x34a9: 0x0008, + 0x34aa: 0x0008, 0x34ab: 0x0008, 0x34ac: 0x0008, 0x34ad: 0x0008, 0x34ae: 0x0008, 0x34af: 0x0008, + 0x34b0: 0x0008, 0x34b1: 0x0008, 0x34b2: 0x0008, 0x34b3: 0x0008, 0x34b4: 0x0008, 0x34b5: 0x0008, + 0x34b6: 0x0008, 0x34b7: 0x0008, 0x34b8: 0x0008, 0x34b9: 0x0008, 0x34ba: 0x0008, + 0x34bc: 0x0008, 0x34bd: 0x0008, 0x34be: 0x0008, 0x34bf: 0x0008, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x0008, 0x34c1: 0x0008, 0x34c2: 0x0008, 0x34c3: 0x0008, 0x34c4: 0x0008, 0x34c5: 0x0008, + 0x34c7: 0x0008, 0x34c8: 0x0008, 0x34c9: 0x0008, 0x34ca: 0x0008, 0x34cb: 0x0008, + 0x34cc: 0x0008, 0x34cd: 0x0008, 0x34ce: 0x0008, 0x34cf: 0x0008, 0x34d0: 0x0008, 0x34d1: 0x0008, + 0x34d2: 0x0008, 0x34d3: 0x0008, 0x34d4: 0x0008, 0x34d5: 0x0008, 0x34d6: 0x0008, 0x34d7: 0x0008, + 0x34d8: 0x0008, 0x34d9: 0x0008, 0x34da: 0x0008, 0x34db: 0x0008, 0x34dc: 0x0008, 0x34dd: 0x0008, + 0x34de: 0x0008, 0x34df: 0x0008, 0x34e0: 0x0008, 0x34e1: 0x0008, 0x34e2: 0x0008, 0x34e3: 0x0008, + 0x34e4: 0x0008, 0x34e5: 0x0008, 0x34e6: 0x0008, 0x34e7: 0x0008, 0x34e8: 0x0008, 0x34e9: 0x0008, + 0x34ea: 0x0008, 0x34eb: 0x0008, 0x34ec: 0x0008, 0x34ed: 0x0008, 0x34ee: 0x0008, 0x34ef: 0x0008, + 0x34f0: 0x0008, 0x34f1: 0x0008, 0x34f2: 0x0008, 0x34f3: 0x0008, 0x34f4: 0x0008, 0x34f5: 0x0008, + 0x34f6: 0x0008, 0x34f7: 0x0008, 0x34f8: 0x0008, 0x34f9: 0x0008, 0x34fa: 0x0008, 0x34fb: 0x0008, + 0x34fc: 0x0008, 0x34fd: 0x0008, 0x34fe: 0x0008, 0x34ff: 0x0008, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0002, 0x3501: 0x0002, 0x3502: 0x0002, 0x3503: 0x0002, 0x3504: 0x0002, 0x3505: 0x0002, + 0x3506: 0x0002, 0x3507: 0x0002, 0x3508: 0x0002, 0x3509: 0x0002, 0x350a: 0x0002, 0x350b: 0x0002, + 0x350c: 0x0002, 0x350d: 0x0002, 0x350e: 0x0002, 0x350f: 0x0002, 0x3510: 0x0002, 0x3511: 0x0002, + 0x3512: 0x0002, 0x3513: 0x0002, 0x3514: 0x0002, 0x3515: 0x0002, 0x3516: 0x0002, 0x3517: 0x0002, + 0x3518: 0x0002, 0x3519: 0x0002, 0x351a: 0x0002, 0x351b: 0x0002, 0x351c: 0x0002, 0x351d: 0x0002, + 0x351e: 0x0002, 0x351f: 0x0002, 0x3520: 0x0004, 0x3521: 0x0004, 0x3522: 0x0004, 0x3523: 0x0004, + 0x3524: 0x0004, 0x3525: 0x0004, 0x3526: 0x0004, 0x3527: 0x0004, 0x3528: 0x0004, 0x3529: 0x0004, + 0x352a: 0x0004, 0x352b: 0x0004, 0x352c: 0x0004, 0x352d: 0x0004, 0x352e: 0x0004, 0x352f: 0x0004, + 0x3530: 0x0004, 0x3531: 0x0004, 0x3532: 0x0004, 0x3533: 0x0004, 0x3534: 0x0004, 0x3535: 0x0004, + 0x3536: 0x0004, 0x3537: 0x0004, 0x3538: 0x0004, 0x3539: 0x0004, 0x353a: 0x0004, 0x353b: 0x0004, + 0x353c: 0x0004, 0x353d: 0x0004, 0x353e: 0x0004, 0x353f: 0x0004, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0002, 0x3541: 0x0002, 0x3542: 0x0002, 0x3543: 0x0002, 0x3544: 0x0002, 0x3545: 0x0002, + 0x3546: 0x0002, 0x3547: 0x0002, 0x3548: 0x0002, 0x3549: 0x0002, 0x354a: 0x0002, 0x354b: 0x0002, + 0x354c: 0x0002, 0x354d: 0x0002, 0x354e: 0x0002, 0x354f: 0x0002, 0x3550: 0x0002, 0x3551: 0x0002, + 0x3552: 0x0002, 0x3553: 0x0002, 0x3554: 0x0002, 0x3555: 0x0002, 0x3556: 0x0002, 0x3557: 0x0002, + 0x3558: 0x0002, 0x3559: 0x0002, 0x355a: 0x0002, 0x355b: 0x0002, 0x355c: 0x0002, 0x355d: 0x0002, + 0x355e: 0x0002, 0x355f: 0x0002, 0x3560: 0x0002, 0x3561: 0x0002, 0x3562: 0x0002, 0x3563: 0x0002, + 0x3564: 0x0002, 0x3565: 0x0002, 0x3566: 0x0002, 0x3567: 0x0002, 0x3568: 0x0002, 0x3569: 0x0002, + 0x356a: 0x0002, 0x356b: 0x0002, 0x356c: 0x0002, 0x356d: 0x0002, 0x356e: 0x0002, 0x356f: 0x0002, + 0x3570: 0x0002, 0x3571: 0x0002, 0x3572: 0x0002, 0x3573: 0x0002, 0x3574: 0x0002, 0x3575: 0x0002, + 0x3576: 0x0002, 0x3577: 0x0002, 0x3578: 0x0002, 0x3579: 0x0002, 0x357a: 0x0002, 0x357b: 0x0002, + 0x357c: 0x0002, 0x357d: 0x0002, 0x357e: 0x0002, 0x357f: 0x0002, + // Block 0xd6, offset 0x3580 + 0x3580: 0x0004, 0x3581: 0x0004, 0x3582: 0x0004, 0x3583: 0x0004, 0x3584: 0x0004, 0x3585: 0x0004, + 0x3586: 0x0004, 0x3587: 0x0004, 0x3588: 0x0004, 0x3589: 0x0004, 0x358a: 0x0004, 0x358b: 0x0004, + 0x358c: 0x0004, 0x358d: 0x0004, 0x358e: 0x0004, 0x358f: 0x0004, 0x3590: 0x0004, 0x3591: 0x0004, + 0x3592: 0x0004, 0x3593: 0x0004, 0x3594: 0x0004, 0x3595: 0x0004, 0x3596: 0x0004, 0x3597: 0x0004, + 0x3598: 0x0004, 0x3599: 0x0004, 0x359a: 0x0004, 0x359b: 0x0004, 0x359c: 0x0004, 0x359d: 0x0004, + 0x359e: 0x0004, 0x359f: 0x0004, 0x35a0: 0x0004, 0x35a1: 0x0004, 0x35a2: 0x0004, 0x35a3: 0x0004, + 0x35a4: 0x0004, 0x35a5: 0x0004, 0x35a6: 0x0004, 0x35a7: 0x0004, 0x35a8: 0x0004, 0x35a9: 0x0004, + 0x35aa: 0x0004, 0x35ab: 0x0004, 0x35ac: 0x0004, 0x35ad: 0x0004, 0x35ae: 0x0004, 0x35af: 0x0004, + 0x35b0: 0x0002, 0x35b1: 0x0002, 0x35b2: 0x0002, 0x35b3: 0x0002, 0x35b4: 0x0002, 0x35b5: 0x0002, + 0x35b6: 0x0002, 0x35b7: 0x0002, 0x35b8: 0x0002, 0x35b9: 0x0002, 0x35ba: 0x0002, 0x35bb: 0x0002, + 0x35bc: 0x0002, 0x35bd: 0x0002, 0x35be: 0x0002, 0x35bf: 0x0002, +} + +// graphemesIndex: 25 blocks, 1600 entries, 1600 bytes +// Block 0 is the zero block. +var graphemesIndex = [1600]property{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, + 0xcc: 0x02, 0xcd: 0x03, + 0xd2: 0x04, 0xd6: 0x05, 0xd7: 0x06, + 0xd8: 0x07, 0xd9: 0x08, 0xdb: 0x09, 0xdc: 0x0a, 0xdd: 0x0b, 0xde: 0x0c, 0xdf: 0x0d, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x0e, 0x121: 0x0f, 0x122: 0x10, 0x123: 0x11, 0x124: 0x12, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x16, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x1c, + 0x130: 0x1d, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x21, 0x135: 0x22, 0x136: 0x23, 0x137: 0x24, + 0x138: 0x25, 0x139: 0x26, 0x13a: 0x27, 0x13b: 0x28, 0x13c: 0x29, 0x13d: 0x2a, 0x13e: 0x2b, 0x13f: 0x2c, + // Block 0x5, offset 0x140 + 0x140: 0x2d, 0x141: 0x2e, 0x142: 0x2f, 0x144: 0x30, 0x145: 0x31, 0x146: 0x32, 0x147: 0x33, + 0x14d: 0x34, + 0x15c: 0x35, 0x15d: 0x36, 0x15e: 0x37, 0x15f: 0x38, + 0x160: 0x39, 0x162: 0x3a, 0x164: 0x3b, + 0x168: 0x3c, 0x169: 0x3d, 0x16a: 0x3e, 0x16b: 0x3f, 0x16c: 0x40, 0x16d: 0x41, 0x16e: 0x42, 0x16f: 0x43, + 0x170: 0x44, 0x173: 0x45, 0x177: 0x02, + // Block 0x6, offset 0x180 + 0x180: 0x46, 0x181: 0x47, 0x183: 0x48, 0x184: 0x49, 0x186: 0x4a, + 0x18c: 0x4b, 0x18e: 0x4c, 0x18f: 0x4d, + 0x193: 0x4e, 0x196: 0x4f, 0x197: 0x50, + 0x198: 0x51, 0x199: 0x52, 0x19a: 0x53, 0x19b: 0x52, 0x19c: 0x54, 0x19d: 0x55, 0x19e: 0x56, + 0x1a4: 0x57, + 0x1ac: 0x58, 0x1ad: 0x59, + 0x1b3: 0x5a, 0x1b5: 0x5b, 0x1b7: 0x5c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x5d, 0x1c2: 0x5e, + 0x1ca: 0x5f, + // Block 0x8, offset 0x200 + 0x219: 0x60, 0x21a: 0x61, 0x21b: 0x62, + 0x220: 0x63, 0x222: 0x64, 0x223: 0x65, 0x224: 0x66, 0x225: 0x67, 0x226: 0x68, 0x227: 0x69, + 0x228: 0x6a, 0x229: 0x6b, 0x22a: 0x6c, 0x22b: 0x6d, 0x22f: 0x6e, + 0x230: 0x6f, 0x231: 0x70, 0x232: 0x71, 0x233: 0x72, 0x234: 0x73, 0x235: 0x74, 0x236: 0x75, 0x237: 0x6f, + 0x238: 0x70, 0x239: 0x71, 0x23a: 0x72, 0x23b: 0x73, 0x23c: 0x74, 0x23d: 0x75, 0x23e: 0x6f, 0x23f: 0x70, + // Block 0x9, offset 0x240 + 0x240: 0x71, 0x241: 0x72, 0x242: 0x73, 0x243: 0x74, 0x244: 0x75, 0x245: 0x6f, 0x246: 0x70, 0x247: 0x71, + 0x248: 0x72, 0x249: 0x73, 0x24a: 0x74, 0x24b: 0x75, 0x24c: 0x6f, 0x24d: 0x70, 0x24e: 0x71, 0x24f: 0x72, + 0x250: 0x73, 0x251: 0x74, 0x252: 0x75, 0x253: 0x6f, 0x254: 0x70, 0x255: 0x71, 0x256: 0x72, 0x257: 0x73, + 0x258: 0x74, 0x259: 0x75, 0x25a: 0x6f, 0x25b: 0x70, 0x25c: 0x71, 0x25d: 0x72, 0x25e: 0x73, 0x25f: 0x74, + 0x260: 0x75, 0x261: 0x6f, 0x262: 0x70, 0x263: 0x71, 0x264: 0x72, 0x265: 0x73, 0x266: 0x74, 0x267: 0x75, + 0x268: 0x6f, 0x269: 0x70, 0x26a: 0x71, 0x26b: 0x72, 0x26c: 0x73, 0x26d: 0x74, 0x26e: 0x75, 0x26f: 0x6f, + 0x270: 0x70, 0x271: 0x71, 0x272: 0x72, 0x273: 0x73, 0x274: 0x74, 0x275: 0x75, 0x276: 0x6f, 0x277: 0x70, + 0x278: 0x71, 0x279: 0x72, 0x27a: 0x73, 0x27b: 0x74, 0x27c: 0x75, 0x27d: 0x6f, 0x27e: 0x70, 0x27f: 0x71, + // Block 0xa, offset 0x280 + 0x280: 0x72, 0x281: 0x73, 0x282: 0x74, 0x283: 0x75, 0x284: 0x6f, 0x285: 0x70, 0x286: 0x71, 0x287: 0x72, + 0x288: 0x73, 0x289: 0x74, 0x28a: 0x75, 0x28b: 0x6f, 0x28c: 0x70, 0x28d: 0x71, 0x28e: 0x72, 0x28f: 0x73, + 0x290: 0x74, 0x291: 0x75, 0x292: 0x6f, 0x293: 0x70, 0x294: 0x71, 0x295: 0x72, 0x296: 0x73, 0x297: 0x74, + 0x298: 0x75, 0x299: 0x6f, 0x29a: 0x70, 0x29b: 0x71, 0x29c: 0x72, 0x29d: 0x73, 0x29e: 0x74, 0x29f: 0x75, + 0x2a0: 0x6f, 0x2a1: 0x70, 0x2a2: 0x71, 0x2a3: 0x72, 0x2a4: 0x73, 0x2a5: 0x74, 0x2a6: 0x75, 0x2a7: 0x6f, + 0x2a8: 0x70, 0x2a9: 0x71, 0x2aa: 0x72, 0x2ab: 0x73, 0x2ac: 0x74, 0x2ad: 0x75, 0x2ae: 0x6f, 0x2af: 0x70, + 0x2b0: 0x71, 0x2b1: 0x72, 0x2b2: 0x73, 0x2b3: 0x74, 0x2b4: 0x75, 0x2b5: 0x6f, 0x2b6: 0x70, 0x2b7: 0x71, + 0x2b8: 0x72, 0x2b9: 0x73, 0x2ba: 0x74, 0x2bb: 0x75, 0x2bc: 0x6f, 0x2bd: 0x70, 0x2be: 0x71, 0x2bf: 0x72, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x73, 0x2c1: 0x74, 0x2c2: 0x75, 0x2c3: 0x6f, 0x2c4: 0x70, 0x2c5: 0x71, 0x2c6: 0x72, 0x2c7: 0x73, + 0x2c8: 0x74, 0x2c9: 0x75, 0x2ca: 0x6f, 0x2cb: 0x70, 0x2cc: 0x71, 0x2cd: 0x72, 0x2ce: 0x73, 0x2cf: 0x74, + 0x2d0: 0x75, 0x2d1: 0x6f, 0x2d2: 0x70, 0x2d3: 0x71, 0x2d4: 0x72, 0x2d5: 0x73, 0x2d6: 0x74, 0x2d7: 0x75, + 0x2d8: 0x6f, 0x2d9: 0x70, 0x2da: 0x71, 0x2db: 0x72, 0x2dc: 0x73, 0x2dd: 0x74, 0x2de: 0x76, 0x2df: 0x77, + // Block 0xc, offset 0x300 + 0x32c: 0x78, + 0x338: 0x79, 0x33b: 0x7a, 0x33e: 0x61, 0x33f: 0x7b, + // Block 0xd, offset 0x340 + 0x347: 0x7c, + 0x34b: 0x7d, 0x34d: 0x7e, + 0x368: 0x7f, 0x36b: 0x80, + 0x374: 0x81, + 0x37a: 0x82, 0x37b: 0x83, 0x37d: 0x84, 0x37e: 0x85, + // Block 0xe, offset 0x380 + 0x380: 0x86, 0x381: 0x87, 0x382: 0x88, 0x383: 0x89, 0x384: 0x8a, 0x385: 0x8b, 0x386: 0x8c, 0x387: 0x8d, + 0x388: 0x8e, 0x389: 0x8f, 0x38b: 0x90, 0x38c: 0x21, 0x38d: 0x91, + 0x390: 0x92, 0x391: 0x93, 0x392: 0x94, 0x393: 0x95, 0x396: 0x96, 0x397: 0x97, + 0x398: 0x98, 0x399: 0x99, 0x39a: 0x9a, 0x39c: 0x9b, + 0x3a0: 0x9c, 0x3a4: 0x9d, 0x3a5: 0x9e, 0x3a7: 0x9f, + 0x3a8: 0xa0, 0x3a9: 0xa1, 0x3aa: 0xa2, + 0x3b0: 0xa3, 0x3b2: 0xa4, 0x3b4: 0xa5, 0x3b5: 0xa6, 0x3b6: 0xa7, + 0x3bb: 0xa8, 0x3bc: 0xa9, 0x3bd: 0xaa, + // Block 0xf, offset 0x3c0 + 0x3d0: 0xab, 0x3d1: 0xac, + // Block 0x10, offset 0x400 + 0x42b: 0xad, 0x42c: 0xae, + 0x43d: 0xaf, 0x43e: 0xb0, 0x43f: 0xb1, + // Block 0x11, offset 0x440 + 0x472: 0xb2, + // Block 0x12, offset 0x480 + 0x4bc: 0xb3, 0x4bd: 0xb4, + // Block 0x13, offset 0x4c0 + 0x4c5: 0xb5, 0x4c6: 0xb6, + 0x4c9: 0xb7, + 0x4e8: 0xb8, 0x4e9: 0xb9, 0x4ea: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xbb, 0x502: 0xbc, 0x504: 0xae, + 0x50a: 0xbd, 0x50b: 0xbe, + 0x513: 0xbe, + 0x523: 0xbf, 0x525: 0xc0, + // Block 0x15, offset 0x540 + 0x540: 0x52, 0x541: 0x52, 0x542: 0x52, 0x543: 0x52, 0x544: 0xc1, 0x545: 0xc2, 0x546: 0xc3, 0x547: 0xc4, + 0x548: 0xc5, 0x549: 0xc6, 0x54a: 0x52, 0x54b: 0x52, 0x54c: 0x52, 0x54d: 0x52, 0x54e: 0x52, 0x54f: 0xc7, + 0x550: 0x52, 0x551: 0x52, 0x552: 0x52, 0x553: 0x52, 0x554: 0xc8, 0x555: 0xc9, 0x556: 0x52, 0x557: 0x52, + 0x558: 0x52, 0x559: 0xca, 0x55a: 0x52, 0x55b: 0x52, 0x55d: 0xcb, 0x55f: 0xcc, + 0x560: 0xcd, 0x561: 0xce, 0x562: 0xcf, 0x563: 0x52, 0x564: 0xd0, 0x565: 0xd1, 0x566: 0x52, 0x567: 0x52, + 0x568: 0x52, 0x569: 0x52, 0x56a: 0x52, 0x56b: 0x52, + 0x570: 0x52, 0x571: 0x52, 0x572: 0x52, 0x573: 0x52, 0x574: 0x52, 0x575: 0x52, 0x576: 0x52, 0x577: 0x52, + 0x578: 0x52, 0x579: 0x52, 0x57a: 0x52, 0x57b: 0x52, 0x57c: 0x52, 0x57d: 0x52, 0x57e: 0x52, 0x57f: 0xc8, + // Block 0x16, offset 0x580 + 0x590: 0x0b, 0x591: 0x0c, 0x593: 0x0d, 0x596: 0x0e, + 0x59b: 0x0f, 0x59c: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0xd2, 0x5c1: 0x02, 0x5c2: 0xd3, 0x5c3: 0xd3, 0x5c4: 0x02, 0x5c5: 0x02, 0x5c6: 0x02, 0x5c7: 0xd4, + 0x5c8: 0xd3, 0x5c9: 0xd3, 0x5ca: 0xd3, 0x5cb: 0xd3, 0x5cc: 0xd3, 0x5cd: 0xd3, 0x5ce: 0xd3, 0x5cf: 0xd3, + 0x5d0: 0xd3, 0x5d1: 0xd3, 0x5d2: 0xd3, 0x5d3: 0xd3, 0x5d4: 0xd3, 0x5d5: 0xd3, 0x5d6: 0xd3, 0x5d7: 0xd3, + 0x5d8: 0xd3, 0x5d9: 0xd3, 0x5da: 0xd3, 0x5db: 0xd3, 0x5dc: 0xd3, 0x5dd: 0xd3, 0x5de: 0xd3, 0x5df: 0xd3, + 0x5e0: 0xd3, 0x5e1: 0xd3, 0x5e2: 0xd3, 0x5e3: 0xd3, 0x5e4: 0xd3, 0x5e5: 0xd3, 0x5e6: 0xd3, 0x5e7: 0xd3, + 0x5e8: 0xd3, 0x5e9: 0xd3, 0x5ea: 0xd3, 0x5eb: 0xd3, 0x5ec: 0xd3, 0x5ed: 0xd3, 0x5ee: 0xd3, 0x5ef: 0xd3, + 0x5f0: 0xd3, 0x5f1: 0xd3, 0x5f2: 0xd3, 0x5f3: 0xd3, 0x5f4: 0xd3, 0x5f5: 0xd3, 0x5f6: 0xd3, 0x5f7: 0xd3, + 0x5f8: 0xd3, 0x5f9: 0xd3, 0x5fa: 0xd3, 0x5fb: 0xd3, 0x5fc: 0xd3, 0x5fd: 0xd3, 0x5fe: 0xd3, 0x5ff: 0xd3, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} diff --git a/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go b/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go new file mode 100644 index 00000000..17e9b550 --- /dev/null +++ b/vendor/github.com/clipperhouse/uax29/v2/internal/iterators/iterator.go @@ -0,0 +1,85 @@ +package iterators + +type Stringish interface { + []byte | string +} + +type SplitFunc[T Stringish] func(T, bool) (int, T, error) + +// Iterator is a generic iterator for words that are either []byte or string. +// Iterate while Next() is true, and access the word via Value(). +type Iterator[T Stringish] struct { + split SplitFunc[T] + data T + start int + pos int +} + +// New creates a new Iterator for the given data and SplitFunc. +func New[T Stringish](split SplitFunc[T], data T) *Iterator[T] { + return &Iterator[T]{ + split: split, + data: data, + } +} + +// SetText sets the text for the iterator to operate on, and resets all state. +func (iter *Iterator[T]) SetText(data T) { + iter.data = data + iter.start = 0 + iter.pos = 0 +} + +// Split sets the SplitFunc for the Iterator. +func (iter *Iterator[T]) Split(split SplitFunc[T]) { + iter.split = split +} + +// Next advances the iterator to the next token. It returns false when there +// are no remaining tokens or an error occurred. +func (iter *Iterator[T]) Next() bool { + if iter.pos == len(iter.data) { + return false + } + if iter.pos > len(iter.data) { + panic("SplitFunc advanced beyond the end of the data") + } + + iter.start = iter.pos + + advance, _, err := iter.split(iter.data[iter.pos:], true) + if err != nil { + panic(err) + } + if advance <= 0 { + panic("SplitFunc returned a zero or negative advance") + } + + iter.pos += advance + if iter.pos > len(iter.data) { + panic("SplitFunc advanced beyond the end of the data") + } + + return true +} + +// Value returns the current token. +func (iter *Iterator[T]) Value() T { + return iter.data[iter.start:iter.pos] +} + +// Start returns the byte position of the current token in the original data. +func (iter *Iterator[T]) Start() int { + return iter.start +} + +// End returns the byte position after the current token in the original data. +func (iter *Iterator[T]) End() int { + return iter.pos +} + +// Reset resets the iterator to the beginning of the data. +func (iter *Iterator[T]) Reset() { + iter.start = 0 + iter.pos = 0 +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml new file mode 100644 index 00000000..e965034e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +version: "2" + +linters: + enable: + - asasalint + - asciicheck + - containedctx + - contextcheck + - errcheck + - errorlint + - exhaustive + - forcetypeassert + - godot + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - musttag + - nilerr + - nilnesserr + - nilnil + - noctx + - prealloc + - revive + - staticcheck + - testifylint + - unconvert + - unparam + - unused + - usetesting + settings: + govet: + enable: + - nilness + testifylint: + enable-all: true + +formatters: + enable: + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cyphar/filepath-securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index ca0e3c62..6862467c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,122 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.5.0] - 2025-09-26 ## + +> Let the past die. Kill it if you have to. + +> **NOTE**: With this release, some parts of +> `github.com/cyphar/filepath-securejoin` are now licensed under the Mozilla +> Public License (version 2). Please see [COPYING.md][] as well as the the +> license header in each file for more details. + +[COPYING.md]: ./COPYING.md + +### Breaking ### +- The new API introduced in the [0.3.0][] release has been moved to a new + subpackage called `pathrs-lite`. This was primarily done to better indicate + the split between the new and old APIs, as well as indicate to users the + purpose of this subpackage (it is a less complete version of [libpathrs][]). + + We have added some wrappers to the top-level package to ease the transition, + but those are deprecated and will be removed in the next minor release of + filepath-securejoin. Users should update their import paths. + + This new subpackage has also been relicensed under the Mozilla Public License + (version 2), please see [COPYING.md][] for more details. + +### Added ### +- Most of the key bits the safe `procfs` API have now been exported and are + available in `github.com/cyphar/filepath-securejoin/pathrs-lite/procfs`. At + the moment this primarily consists of a new `procfs.Handle` API: + + * `OpenProcRoot` returns a new handle to `/proc`, endeavouring to make it + safe if possible (`subset=pid` to protect against mistaken write attacks + and leaks, as well as using `fsopen(2)` to avoid racing mount attacks). + + `OpenUnsafeProcRoot` returns a handle without attempting to create one + with `subset=pid`, which makes it more dangerous to leak. Most users + should use `OpenProcRoot` (even if you need to use `ProcRoot` as the base + of an operation, as filepath-securejoin will internally open a handle when + necessary). + + * The `(*procfs.Handle).Open*` family of methods lets you get a safe + `O_PATH` handle to subpaths within `/proc` for certain subpaths. + + For `OpenThreadSelf`, the returned `ProcThreadSelfCloser` needs to be + called after you completely finish using the handle (this is necessary + because Go is multi-threaded and `ProcThreadSelf` references + `/proc/thread-self` which may disappear if we do not + `runtime.LockOSThread` -- `ProcThreadSelfCloser` is currently equivalent + to `runtime.UnlockOSThread`). + + Note that you cannot open any `procfs` symlinks (most notably magic-links) + using this API. At the moment, filepath-securejoin does not support this + feature (but [libpathrs][] does). + + * `ProcSelfFdReadlink` lets you get the in-kernel path representation of a + file descriptor (think `readlink("/proc/self/fd/...")`), except that we + verify that there aren't any tricky overmounts that could fool the + process. + + Please be aware that the returned string is simply a snapshot at that + particular moment, and an attacker could move the file being pointed to. + In addition, complex namespace configurations could result in non-sensical + or confusing paths to be returned. The value received from this function + should only be used as secondary verification of some security property, + not as proof that a particular handle has a particular path. + + The procfs handle used internally by the API is the same as the rest of + `filepath-securejoin` (for privileged programs this is usually a private + in-process `procfs` instance created with `fsopen(2)`). + + As before, this is intended as a stop-gap before users migrate to + [libpathrs][], which provides a far more extensive safe `procfs` API and is + generally more robust. + +- Previously, the hardened procfs implementation (used internally within + `Reopen` and `Open(at)InRoot`) only protected against overmount attacks on + systems with `openat2(2)` (Linux 5.6) or systems with `fsopen(2)` or + `open_tree(2)` (Linux 5.2) and programs with privileges to use them (with + some caveats about locked mounts that probably affect very few users). For + other users, an attacker with the ability to create malicious mounts (on most + systems, a sysadmin) could trick you into operating on files you didn't + expect. This attack only really makes sense in the context of container + runtime implementations. + + This was considered a reasonable trade-off, as the long-term intention was to + get all users to just switch to [libpathrs][] if they wanted to use the safe + `procfs` API (which had more extensive protections, and is what these new + protections in `filepath-securejoin` are based on). However, as the API + is now being exported it seems unwise to advertise the API as "safe" if we do + not protect against known attacks. + + The procfs API is now more protected against attackers on systems lacking the + aforementioned protections. However, the most comprehensive of these + protections effectively rely on [`statx(STATX_MNT_ID)`][statx.2] (Linux 5.8). + On older kernel versions, there is no effective protection (there is some + minimal protection against non-`procfs` filesystem components but a + sufficiently clever attacker can work around those). In addition, + `STATX_MNT_ID` is vulnerable to mount ID reuse attacks by sufficiently + motivated and privileged attackers -- this problem is mitigated with + `STATX_MNT_ID_UNIQUE` (Linux 6.8) but that raises the minimum kernel version + for more protection. + + The fact that these protections are quite limited despite needing a fair bit + of extra code to handle was one of the primary reasons we did not initially + implement this in `filepath-securejoin` ([libpathrs][] supports all of this, + of course). + +### Fixed ### +- RHEL 8 kernels have backports of `fsopen(2)` but in some testing we've found + that it has very bad (and very difficult to debug) performance issues, and so + we will explicitly refuse to use `fsopen(2)` if the running kernel version is + pre-5.2 and will instead fallback to `open("/proc")`. + +[CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv +[libpathrs]: https://github.com/cyphar/libpathrs +[statx.2]: https://www.man7.org/linux/man-pages/man2/statx.2.html + ## [0.4.1] - 2025-01-28 ## ### Fixed ### @@ -173,7 +289,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). safe to start migrating to as we have extensive tests ensuring they behave correctly and are safe against various races and other attacks. -[libpathrs]: https://github.com/openSUSE/libpathrs +[libpathrs]: https://github.com/cyphar/libpathrs [open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html ## [0.2.5] - 2024-05-03 ## @@ -238,7 +354,8 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...HEAD +[0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 [0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 diff --git a/vendor/github.com/cyphar/filepath-securejoin/COPYING.md b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md new file mode 100644 index 00000000..520e822b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md @@ -0,0 +1,447 @@ +## COPYING ## + +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +This project is made up of code licensed under different licenses. Which code +you use will have an impact on whether only one or both licenses apply to your +usage of this library. + +Note that **each file** in this project individually has a code comment at the +start describing the license of that particular file -- this is the most +accurate license information of this project; in case there is any conflict +between this document and the comment at the start of a file, the comment shall +take precedence. The only purpose of this document is to work around [a known +technical limitation of pkg.go.dev's license checking tool when dealing with +non-trivial project licenses][go75067]. + +[go75067]: https://go.dev/issue/75067 + +### `BSD-3-Clause` ### + +At time of writing, the following files and directories are licensed under the +BSD-3-Clause license: + + * `doc.go` + * `join*.go` + * `vfs.go` + * `internal/consts/*.go` + * `pathrs-lite/internal/gocompat/*.go` + * `pathrs-lite/internal/kernelversion/*.go` + +The text of the BSD-3-Clause license used by this project is the following (the +text is also available from the [`LICENSE.BSD`](./LICENSE.BSD) file): + +``` +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` + +### `MPL-2.0` ### + +All other files (unless otherwise marked) are licensed under the Mozilla Public +License (version 2.0). + +The text of the Mozilla Public License (version 2.0) is the following (the text +is also available from the [`LICENSE.MPL-2.0`](./LICENSE.MPL-2.0) file): + +``` +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. +``` diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD similarity index 100% rename from vendor/github.com/cyphar/filepath-securejoin/LICENSE rename to vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 new file mode 100644 index 00000000..d0a1fa14 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index eaeb53fc..6673abfc 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -67,7 +67,8 @@ func SecureJoin(root, unsafePath string) (string, error) { [libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 -### New API ### +### New API ### +[#new-api]: #new-api While we recommend users switch to [libpathrs][libpathrs] as soon as it has a stable release, some methods implemented by libpathrs have been ported to this @@ -165,5 +166,19 @@ after `MkdirAll`). ### License ### -The license of this project is the same as Go, which is a BSD 3-clause license -available in the `LICENSE` file. +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +Some of the code in this project is derived from Go, and is licensed under a +BSD 3-clause license (available in `LICENSE.BSD`). Other files (many of which +are derived from [libpathrs][libpathrs]) are licensed under the Mozilla Public +License version 2.0 (available in `LICENSE.MPL-2.0`). If you are using the +["New API" described above][#new-api], you are probably using code from files +released under this license. + +Every source file in this project has a copyright header describing its +license. Please check the license headers of each file to see what license +applies to it. + +See [COPYING.md](./COPYING.md) for some more details. + +[umoci]: https://github.com/opencontainers/umoci diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 267577d4..8f0916f7 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.4.1 +0.5.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/codecov.yml b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml new file mode 100644 index 00000000..ff284dbf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +comment: + layout: "condensed_header, reach, diff, components, condensed_files, condensed_footer" + require_changes: true + branches: + - main + +coverage: + range: 60..100 + status: + project: + default: + target: 85% + threshold: 0% + patch: + default: + target: auto + informational: true + +github_checks: + annotations: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go b/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go new file mode 100644 index 00000000..3e427b16 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package securejoin + +import ( + "github.com/cyphar/filepath-securejoin/pathrs-lite" +) + +var ( + // MkdirAll is a wrapper around [pathrs.MkdirAll]. + // + // Deprecated: You should use [pathrs.MkdirAll] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + MkdirAll = pathrs.MkdirAll + + // MkdirAllHandle is a wrapper around [pathrs.MkdirAllHandle]. + // + // Deprecated: You should use [pathrs.MkdirAllHandle] directly instead. + // This wrapper will be removed in filepath-securejoin v0.6. + MkdirAllHandle = pathrs.MkdirAllHandle + + // OpenInRoot is a wrapper around [pathrs.OpenInRoot]. + // + // Deprecated: You should use [pathrs.OpenInRoot] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + OpenInRoot = pathrs.OpenInRoot + + // OpenatInRoot is a wrapper around [pathrs.OpenatInRoot]. + // + // Deprecated: You should use [pathrs.OpenatInRoot] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + OpenatInRoot = pathrs.OpenatInRoot + + // Reopen is a wrapper around [pathrs.Reopen]. + // + // Deprecated: You should use [pathrs.Reopen] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + Reopen = pathrs.Reopen +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go index 1ec7d065..1438fc9c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/doc.go +++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -14,14 +16,13 @@ // **not** safe against race conditions where an attacker changes the // filesystem after (or during) the [SecureJoin] operation. // -// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived -// functions). These are safe against racing attackers and have several other -// protections that are not provided by the legacy API. There are many more -// operations that most programs expect to be able to do safely, but we do not -// provide explicit support for them because we want to encourage users to -// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a -// cross-language next-generation library that is entirely designed around -// operating on paths safely. +// The new API is available in the [pathrs-lite] subpackage, and provide +// protections against racing attackers as well as several other key +// protections against attacks often seen by container runtimes. As the name +// suggests, [pathrs-lite] is a stripped down (pure Go) reimplementation of +// [libpathrs]. The main APIs provided are [OpenInRoot], [MkdirAll], and +// [procfs.Handle] -- other APIs are not planned to be ported. The long-term +// goal is for users to migrate to [libpathrs] which is more fully-featured. // // securejoin has been used by several container runtimes (Docker, runc, // Kubernetes, etc) for quite a few years as a de-facto standard for operating @@ -31,9 +32,16 @@ // API as soon as possible (or even better, switch to libpathrs). // // This project was initially intended to be included in the Go standard -// library, but [it was rejected](https://go.dev/issue/20126). There is now a -// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API -// that shares some of the goals of filepath-securejoin. However, that design -// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the -// usecase of container runtimes and most system tools. +// library, but it was rejected (see https://go.dev/issue/20126). Much later, +// [os.Root] was added to the Go stdlib that shares some of the goals of +// filepath-securejoin. However, its design is intended to work like +// openat2(RESOLVE_BENEATH) which does not fit the usecase of container +// runtimes and most system tools. +// +// [pathrs-lite]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite +// [libpathrs]: https://github.com/openSUSE/libpathrs +// [OpenInRoot]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#OpenInRoot +// [MkdirAll]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#MkdirAll +// [procfs.Handle]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs#Handle +// [os.Root]: https:///pkg.go.dev/os#Root package securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go deleted file mode 100644 index ddd6fa9a..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "slices" - "sync" -) - -func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { - return slices.DeleteFunc(slice, delFn) -} - -func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { - return slices.Contains(slice, val) -} - -func slices_Clone[S ~[]E, E any](slice S) S { - return slices.Clone(slice) -} - -func sync_OnceValue[T any](f func() T) func() T { - return sync.OnceValue(f) -} - -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - return sync.OnceValues(f) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go deleted file mode 100644 index f1e6fe7e..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build linux && !go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "sync" -) - -// These are very minimal implementations of functions that appear in Go 1.21's -// stdlib, included so that we can build on older Go versions. Most are -// borrowed directly from the stdlib, and a few are modified to be "obviously -// correct" without needing to copy too many other helpers. - -// clearSlice is equivalent to the builtin clear from Go 1.21. -// Copied from the Go 1.24 stdlib implementation. -func clearSlice[S ~[]E, E any](slice S) { - var zero E - for i := range slice { - slice[i] = zero - } -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := slices_IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Similar to the stdlib slices.Contains, except that we don't have -// slices.Index so we need to use slices.IndexFunc for this non-Func helper. -func slices_Contains[S ~[]E, E comparable](s S, v E) bool { - return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValue[T any](f func() T) func() T { - var ( - once sync.Once - valid bool - p any - result T - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - result = f() - f = nil - valid = true - } - return func() T { - once.Do(g) - if !valid { - panic(p) - } - return result - } -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - var ( - once sync.Once - valid bool - p any - r1 T1 - r2 T2 - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - r1, r2 = f() - f = nil - valid = true - } - return func() (T1, T2) { - once.Do(g) - if !valid { - panic(p) - } - return r1, r2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go new file mode 100644 index 00000000..c69c4da9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package consts contains the definitions of internal constants used +// throughout filepath-securejoin. +package consts + +// MaxSymlinkLimit is the maximum number of symlinks that can be encountered +// during a single lookup before returning -ELOOP. At time of writing, Linux +// has an internal limit of 40. +const MaxSymlinkLimit = 255 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index e6634d47..199c1d83 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2025 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -11,9 +13,9 @@ import ( "path/filepath" "strings" "syscall" -) -const maxSymlinkLimit = 255 + "github.com/cyphar/filepath-securejoin/internal/consts" +) // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is @@ -49,12 +51,13 @@ func hasDotDot(path string) bool { return strings.Contains("/"+path+"/", "/../") } -// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except -// that the returned path is guaranteed to be scoped inside the provided root -// path (when evaluated). Any symbolic links in the path are evaluated with the -// given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given [VFS] interface (if nil, the -// standard [os].* family of functions are used). +// SecureJoinVFS joins the two given path components (similar to +// [filepath.Join]) except that the returned path is guaranteed to be scoped +// inside the provided root path (when evaluated). Any symbolic links in the +// path are evaluated with the given root treated as the root of the +// filesystem, similar to a chroot. The filesystem state is evaluated through +// the given [VFS] interface (if nil, the standard [os].* family of functions +// are used). // // Note that the guarantees provided by this function only apply if the path // components in the returned string are not modified (in other words are not @@ -78,7 +81,7 @@ func hasDotDot(path string) bool { // fully resolved using [filepath.EvalSymlinks] or otherwise constructed to // avoid containing symlink components. Of course, the root also *must not* be // attacker-controlled. -func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { //nolint:revive // name is part of public API // The root path must not contain ".." components, otherwise when we join // the subpath we will end up with a weird path. We could work around this // in other ways but users shouldn't be giving us non-lexical root paths in @@ -138,7 +141,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { // It's a symlink, so get its contents and expand it by prepending it // to the yet-unparsed path. linksWalked++ - if linksWalked > maxSymlinkLimit { + if linksWalked > consts.MaxSymlinkLimit { return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go deleted file mode 100644 index f7a13e69..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var hasOpenat2 = sync_OnceValue(func() bool { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { - // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve - // ".." while a mount or rename occurs anywhere on the system. This could - // happen spuriously, or as the result of an attacker trying to mess with - // us during lookup. - // - // In addition, scoped lookups have a "safety check" at the end of - // complete_walk which will return -EXDEV if the final path is not in the - // root. - return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && - (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) -} - -const scopedLookupMaxRetries = 10 - -func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { - fullPath := dir.Name() + "/" + path - // Make sure we always set O_CLOEXEC. - how.Flags |= unix.O_CLOEXEC - var tries int - for tries < scopedLookupMaxRetries { - fd, err := unix.Openat2(int(dir.Fd()), path, how) - if err != nil { - if scopedLookupShouldRetry(how, err) { - // We retry a couple of times to avoid the spurious errors, and - // if we are being attacked then returning -EAGAIN is the best - // we can do. - tries++ - continue - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} - } - // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. - // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise - // you'll get infinite recursion here. - if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { - if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { - fullPath = actualPath - } - } - return os.NewFile(uintptr(fd), fullPath), nil - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} -} - -func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { - if !partial { - file, err := openat2File(root, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - return file, "", err - } - return partialLookupOpenat2(root, unsafePath) -} - -// partialLookupOpenat2 is an alternative implementation of -// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a -// handle to the deepest existing child of the requested path within the root. -func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { - // TODO: Implement this as a git-bisect-like binary search. - - unsafePath = filepath.ToSlash(unsafePath) // noop - endIdx := len(unsafePath) - var lastError error - for endIdx > 0 { - subpath := unsafePath[:endIdx] - - handle, err := openat2File(root, subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - if err == nil { - // Jump over the slash if we have a non-"" remainingPath. - if endIdx < len(unsafePath) { - endIdx += 1 - } - // We found a subpath! - return handle, unsafePath[endIdx:], lastError - } - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { - // That path doesn't exist, let's try the next directory up. - endIdx = strings.LastIndexByte(subpath, '/') - lastError = err - continue - } - return nil, "", fmt.Errorf("open subpath: %w", err) - } - // If we couldn't open anything, the whole subpath is missing. Return a - // copy of the root fd so that the caller doesn't close this one by - // accident. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", err - } - return rootClone, unsafePath, lastError -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go deleted file mode 100644 index 949fb5f2..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -func dupFile(f *os.File) (*os.File, error) { - fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) - } - return os.NewFile(uintptr(fd), f.Name()), nil -} - -func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.O_CLOEXEC - fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} - } - // All of the paths we use with openatFile(2) are guaranteed to be - // lexically safe, so we can use path.Join here. - fullPath := filepath.Join(dir.Name(), path) - return os.NewFile(uintptr(fd), fullPath), nil -} - -func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { - return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} - } - return stat, nil -} - -func readlinkatFile(dir *os.File, path string) (string, error) { - size := 4096 - for { - linkBuf := make([]byte, size) - n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) - if err != nil { - return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} - } - if n != size { - return string(linkBuf[:n]), nil - } - // Possible truncation, resize the buffer. - size *= 2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md new file mode 100644 index 00000000..1be727e7 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md @@ -0,0 +1,33 @@ +## `pathrs-lite` ## + +`github.com/cyphar/filepath-securejoin/pathrs-lite` provides a minimal **pure +Go** implementation of the core bits of [libpathrs][]. This is not intended to +be a complete replacement for libpathrs, instead it is mainly intended to be +useful as a transition tool for existing Go projects. + +The long-term plan for `pathrs-lite` is to provide a build tag that will cause +all `pathrs-lite` operations to call into libpathrs directly, thus removing +code duplication for projects that wish to make use of libpathrs (and providing +the ability for software packagers to opt-in to libpathrs support without +needing to patch upstream). + +[libpathrs]: https://github.com/cyphar/libpathrs + +### License ### + +Most of this subpackage is licensed under the Mozilla Public License (version +2.0). For more information, see the top-level [COPYING.md][] and +[LICENSE.MPL-2.0][] files, as well as the individual license headers for each +file. + +``` +Copyright (C) 2024-2025 Aleksa Sarai +Copyright (C) 2024-2025 SUSE LLC + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, You can obtain one at https://mozilla.org/MPL/2.0/. +``` + +[COPYING.md]: ../COPYING.md +[LICENSE.MPL-2.0]: ../LICENSE.MPL-2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go new file mode 100644 index 00000000..d3d74517 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package pathrs (pathrs-lite) is a less complete pure Go implementation of +// some of the APIs provided by [libpathrs]. +package pathrs diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go new file mode 100644 index 00000000..595dfbf1 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2025 Aleksa Sarai +// Copyright (C) 2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package assert provides some basic assertion helpers for Go. +package assert + +import ( + "fmt" +) + +// Assert panics if the predicate is false with the provided argument. +func Assert(predicate bool, msg any) { + if !predicate { + panic(msg) + } +} + +// Assertf panics if the predicate is false and formats the message using the +// same formatting as [fmt.Printf]. +// +// [fmt.Printf]: https://pkg.go.dev/fmt#Printf +func Assertf(predicate bool, fmtMsg string, args ...any) { + Assert(predicate, fmt.Sprintf(fmtMsg, args...)) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go new file mode 100644 index 00000000..c26e440e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package internal contains unexported common code for filepath-securejoin. +package internal + +import ( + "errors" +) + +var ( + // ErrPossibleAttack indicates that some attack was detected. + ErrPossibleAttack = errors.New("possible attack detected") + + // ErrPossibleBreakout indicates that during an operation we ended up in a + // state that could be a breakout but we detected it. + ErrPossibleBreakout = errors.New("possible breakout detected") + + // ErrInvalidDirectory indicates an unlinked directory. + ErrInvalidDirectory = errors.New("wandered into deleted directory") + + // ErrDeletedInode indicates an unlinked file (non-directory). + ErrDeletedInode = errors.New("cannot verify path of deleted inode") +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go new file mode 100644 index 00000000..09105491 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// prepareAtWith returns -EBADF (an invalid fd) if dir is nil, otherwise using +// the dir.Fd(). We use -EBADF because in filepath-securejoin we generally +// don't want to allow relative-to-cwd paths. The returned path is an +// *informational* string that describes a reasonable pathname for the given +// *at(2) arguments. You must not use the full path for any actual filesystem +// operations. +func prepareAt(dir Fd, path string) (dirFd int, unsafeUnmaskedPath string) { + dirFd, dirPath := -int(unix.EBADF), "." + if dir != nil { + dirFd, dirPath = int(dir.Fd()), dir.Name() + } + if !filepath.IsAbs(path) { + // only prepend the dirfd path for relative paths + path = dirPath + "/" + path + } + // NOTE: If path is "." or "", the returned path won't be filepath.Clean, + // but that's okay since this path is either used for errors (in which case + // a trailing "/" or "/." is important information) or will be + // filepath.Clean'd later (in the case of fd.Openat). + return dirFd, path +} + +// Openat is an [Fd]-based wrapper around unix.Openat. +func Openat(dir Fd, path string, flags int, mode int) (*os.File, error) { //nolint:unparam // wrapper func + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(dirFd, path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + // openat is only used with lexically-safe paths so we can use + // filepath.Clean here, and also the path itself is not going to be used + // for actual path operations. + fullPath = filepath.Clean(fullPath) + return os.NewFile(uintptr(fd), fullPath), nil +} + +// Fstatat is an [Fd]-based wrapper around unix.Fstatat. +func Fstatat(dir Fd, path string, flags int) (unix.Stat_t, error) { + dirFd, fullPath := prepareAt(dir, path) + var stat unix.Stat_t + if err := unix.Fstatat(dirFd, path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return stat, nil +} + +// Faccessat is an [Fd]-based wrapper around unix.Faccessat. +func Faccessat(dir Fd, path string, mode uint32, flags int) error { + dirFd, fullPath := prepareAt(dir, path) + err := unix.Faccessat(dirFd, path, mode, flags) + if err != nil { + err = &os.PathError{Op: "faccessat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return err +} + +// Readlinkat is an [Fd]-based wrapper around unix.Readlinkat. +func Readlinkat(dir Fd, path string) (string, error) { + dirFd, fullPath := prepareAt(dir, path) + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(dirFd, path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} + +const ( + // STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to + // avoid bumping the requirement for a single constant we can just define it + // ourselves. + _STATX_MNT_ID_UNIQUE = 0x4000 //nolint:revive // unix.* name + + // We don't care which mount ID we get. The kernel will give us the unique + // one if it is supported. If the kernel doesn't support + // STATX_MNT_ID_UNIQUE, the bit is ignored and the returned request mask + // will only contain STATX_MNT_ID (if supported). + wantStatxMntMask = _STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID +) + +var hasStatxMountID = gocompat.SyncOnceValue(func() bool { + var stx unix.Statx_t + err := unix.Statx(-int(unix.EBADF), "/", 0, wantStatxMntMask, &stx) + return err == nil && stx.Mask&wantStatxMntMask != 0 +}) + +// GetMountID gets the mount identifier associated with the fd and path +// combination. It is effectively a wrapper around fetching +// STATX_MNT_ID{,_UNIQUE} with unix.Statx, but with a fallback to 0 if the +// kernel doesn't support the feature. +func GetMountID(dir Fd, path string) (uint64, error) { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountID() { + return 0, nil + } + + dirFd, fullPath := prepareAt(dir, path) + + var stx unix.Statx_t + err := unix.Statx(dirFd, path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, wantStatxMntMask, &stx) + if stx.Mask&wantStatxMntMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + err = fmt.Errorf("could not get mount id: %w", err) + } + if err != nil { + return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return stx.Mnt_id, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go new file mode 100644 index 00000000..d2206a38 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2025 Aleksa Sarai +// Copyright (C) 2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package fd provides a drop-in interface-based replacement of [*os.File] that +// allows for things like noop-Close wrappers to be used. +// +// [*os.File]: https://pkg.go.dev/os#File +package fd + +import ( + "io" + "os" +) + +// Fd is an interface that mirrors most of the API of [*os.File], allowing you +// to create wrappers that can be used in place of [*os.File]. +// +// [*os.File]: https://pkg.go.dev/os#File +type Fd interface { + io.Closer + Name() string + Fd() uintptr +} + +// Compile-time interface checks. +var ( + _ Fd = (*os.File)(nil) + _ Fd = noClose{} +) + +type noClose struct{ inner Fd } + +func (f noClose) Name() string { return f.inner.Name() } +func (f noClose) Fd() uintptr { return f.inner.Fd() } + +func (f noClose) Close() error { return nil } + +// NopCloser returns an [*os.File]-like object where the [Close] method is now +// a no-op. +// +// Note that for [*os.File] and similar objects, the Go garbage collector will +// still call [Close] on the underlying file unless you use +// [runtime.SetFinalizer] to disable this behaviour. This is up to the caller +// to do (if necessary). +// +// [*os.File]: https://pkg.go.dev/os#File +// [Close]: https://pkg.go.dev/io#Closer +// [runtime.SetFinalizer]: https://pkg.go.dev/runtime#SetFinalizer +func NopCloser(f Fd) Fd { return noClose{inner: f} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go new file mode 100644 index 00000000..e1ec3c0b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "fmt" + "os" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" +) + +// DupWithName creates a new file descriptor referencing the same underlying +// file, but with the provided name instead of fd.Name(). +func DupWithName(fd Fd, name string) (*os.File, error) { + fd2, err := unix.FcntlInt(fd.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + runtime.KeepAlive(fd) + return os.NewFile(uintptr(fd2), name), nil +} + +// Dup creates a new file description referencing the same underlying file. +func Dup(fd Fd) (*os.File, error) { + return DupWithName(fd, fd.Name()) +} + +// Fstat is an [Fd]-based wrapper around unix.Fstat. +func Fstat(fd Fd) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(fd.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: fd.Name(), Err: err} + } + runtime.KeepAlive(fd) + return stat, nil +} + +// Fstatfs is an [Fd]-based wrapper around unix.Fstatfs. +func Fstatfs(fd Fd) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(fd.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: fd.Name(), Err: err} + } + runtime.KeepAlive(fd) + return statfs, nil +} + +// IsDeadInode detects whether the file has been unlinked from a filesystem and +// is thus a "dead inode" from the kernel's perspective. +func IsDeadInode(file Fd) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := Fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := internal.ErrDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = internal.ErrInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go new file mode 100644 index 00000000..77549c7a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "os" + "runtime" + + "golang.org/x/sys/unix" +) + +// Fsopen is an [Fd]-based wrapper around unix.Fsopen. +func Fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +// Fsmount is an [Fd]-based wrapper around unix.Fsmount. +func Fsmount(ctx Fd, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +// OpenTree is an [Fd]-based wrapper around unix.OpenTree. +func OpenTree(dir Fd, path string, flags uint) (*os.File, error) { + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return os.NewFile(uintptr(fd), fullPath), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go new file mode 100644 index 00000000..23053083 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "errors" + "os" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" +) + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +const scopedLookupMaxRetries = 32 + +// Openat2 is an [Fd]-based wrapper around unix.Openat2, but with some retry +// logic in case of EAGAIN errors. +func Openat2(dir Fd, path string, how *unix.OpenHow) (*os.File, error) { + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for tries < scopedLookupMaxRetries { + fd, err := unix.Openat2(dirFd, path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return os.NewFile(uintptr(fd), fullPath), nil + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: internal.ErrPossibleAttack} +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md new file mode 100644 index 00000000..5dcb6ae0 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md @@ -0,0 +1,10 @@ +## gocompat ## + +This directory contains backports of stdlib functions from later Go versions so +the filepath-securejoin can continue to be used by projects that are stuck with +Go 1.18 support. Note that often filepath-securejoin is added in security +patches for old releases, so avoiding the need to bump Go compiler requirements +is a huge plus to downstreams. + +The source code is licensed under the same license as the Go stdlib. See the +source files for the precise license information. diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go new file mode 100644 index 00000000..4b1803f5 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux && go1.20 + +// Copyright (C) 2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocompat includes compatibility shims (backported from future Go +// stdlib versions) to permit filepath-securejoin to be used with older Go +// versions (often filepath-securejoin is added in security patches for old +// releases, so avoiding the need to bump Go compiler requirements is a huge +// plus to downstreams). +package gocompat diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go similarity index 69% rename from vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go index 42452bbf..4a114bd3 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go @@ -1,18 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux && go1.20 // Copyright (C) 2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package securejoin +package gocompat import ( "fmt" ) -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except // that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) // is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { +func WrapBaseError(baseErr, extraErr error) error { return fmt.Errorf("%w: %w", extraErr, baseErr) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go similarity index 80% rename from vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go index e7adca3f..3061016a 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause + //go:build linux && !go1.20 // Copyright (C) 2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package securejoin +package gocompat import ( "fmt" @@ -27,10 +29,10 @@ func (err wrappedError) Error() string { return fmt.Sprintf("%v: %v", err.isError, err.inner) } -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except // that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) // is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { +func WrapBaseError(baseErr, extraErr error) error { return wrappedError{ inner: baseErr, isError: extraErr, diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go new file mode 100644 index 00000000..d4a93818 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && go1.21 + +// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocompat + +import ( + "cmp" + "slices" + "sync" +) + +// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. +func SlicesDeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { + return slices.DeleteFunc(slice, delFn) +} + +// SlicesContains is equivalent to Go 1.21's slices.Contains. +func SlicesContains[S ~[]E, E comparable](slice S, val E) bool { + return slices.Contains(slice, val) +} + +// SlicesClone is equivalent to Go 1.21's slices.Clone. +func SlicesClone[S ~[]E, E any](slice S) S { + return slices.Clone(slice) +} + +// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. +func SyncOnceValue[T any](f func() T) func() T { + return sync.OnceValue(f) +} + +// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. +func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + return sync.OnceValues(f) +} + +// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. +type CmpOrdered = cmp.Ordered + +// CmpCompare is equivalent to Go 1.21's cmp.Compare. +func CmpCompare[T CmpOrdered](x, y T) int { + return cmp.Compare(x, y) +} + +// Max2 is equivalent to Go 1.21's max builtin (but only for two parameters). +func Max2[T CmpOrdered](x, y T) T { + return max(x, y) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go new file mode 100644 index 00000000..0ea6218a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !go1.21 + +// Copyright (C) 2021, 2022 The Go Authors. All rights reserved. +// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +package gocompat + +import ( + "sync" +) + +// These are very minimal implementations of functions that appear in Go 1.21's +// stdlib, included so that we can build on older Go versions. Most are +// borrowed directly from the stdlib, and a few are modified to be "obviously +// correct" without needing to copy too many other helpers. + +// clearSlice is equivalent to Go 1.21's builtin clear. +// Copied from the Go 1.24 stdlib implementation. +func clearSlice[S ~[]E, E any](slice S) { + var zero E + for i := range slice { + slice[i] = zero + } +} + +// slicesIndexFunc is equivalent to Go 1.21's slices.IndexFunc. +// Copied from the Go 1.24 stdlib implementation. +func slicesIndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. +// Copied from the Go 1.24 stdlib implementation. +func SlicesDeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := slicesIndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// SlicesContains is equivalent to Go 1.21's slices.Contains. +// Similar to the stdlib slices.Contains, except that we don't have +// slices.Index so we need to use slices.IndexFunc for this non-Func helper. +func SlicesContains[S ~[]E, E comparable](s S, v E) bool { + return slicesIndexFunc(s, func(e E) bool { return e == v }) >= 0 +} + +// SlicesClone is equivalent to Go 1.21's slices.Clone. +// Copied from the Go 1.24 stdlib implementation. +func SlicesClone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. +// Copied from the Go 1.25 stdlib implementation. +func SyncOnceValue[T any](f func() T) func() T { + // Use a struct so that there's a single heap allocation. + d := struct { + f func() T + once sync.Once + valid bool + p any + result T + }{ + f: f, + } + return func() T { + d.once.Do(func() { + defer func() { + d.f = nil + d.p = recover() + if !d.valid { + panic(d.p) + } + }() + d.result = d.f() + d.valid = true + }) + if !d.valid { + panic(d.p) + } + return d.result + } +} + +// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. +// Copied from the Go 1.25 stdlib implementation. +func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + // Use a struct so that there's a single heap allocation. + d := struct { + f func() (T1, T2) + once sync.Once + valid bool + p any + r1 T1 + r2 T2 + }{ + f: f, + } + return func() (T1, T2) { + d.once.Do(func() { + defer func() { + d.f = nil + d.p = recover() + if !d.valid { + panic(d.p) + } + }() + d.r1, d.r2 = d.f() + d.valid = true + }) + if !d.valid { + panic(d.p) + } + return d.r1, d.r2 + } +} + +// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. +// Copied from the Go 1.25 stdlib implementation. +type CmpOrdered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | + ~float32 | ~float64 | + ~string +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +// Copied from the Go 1.25 stdlib implementation. +func isNaN[T CmpOrdered](x T) bool { + return x != x +} + +// CmpCompare is equivalent to Go 1.21's cmp.Compare. +// Copied from the Go 1.25 stdlib implementation. +func CmpCompare[T CmpOrdered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN { + if yNaN { + return 0 + } + return -1 + } + if yNaN { + return +1 + } + if x < y { + return -1 + } + if x > y { + return +1 + } + return 0 +} + +// Max2 is equivalent to Go 1.21's max builtin for two parameters. +func Max2[T CmpOrdered](x, y T) T { + m := x + if y > m { + m = y + } + return m +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go new file mode 100644 index 00000000..cb6de418 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright (C) 2022 The Go Authors. All rights reserved. +// Copyright (C) 2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// The parsing logic is very loosely based on the Go stdlib's +// src/internal/syscall/unix/kernel_version_linux.go but with an API that looks +// a bit like runc's libcontainer/system/kernelversion. +// +// TODO(cyphar): This API has been copied around to a lot of different projects +// (Docker, containerd, runc, and now filepath-securejoin) -- maybe we should +// put it in a separate project? + +// Package kernelversion provides a simple mechanism for checking whether the +// running kernel is at least as new as some baseline kernel version. This is +// often useful when checking for features that would be too complicated to +// test support for (or in cases where we know that some kernel features in +// backport-heavy kernels are broken and need to be avoided). +package kernelversion + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// KernelVersion is a numeric representation of the key numerical elements of a +// kernel version (for instance, "4.1.2-default-1" would be represented as +// KernelVersion{4, 1, 2}). +type KernelVersion []uint64 + +func (kver KernelVersion) String() string { + var str strings.Builder + for idx, elem := range kver { + if idx != 0 { + _, _ = str.WriteRune('.') + } + _, _ = str.WriteString(strconv.FormatUint(elem, 10)) + } + return str.String() +} + +var errInvalidKernelVersion = errors.New("invalid kernel version") + +// parseKernelVersion parses a string and creates a KernelVersion based on it. +func parseKernelVersion(kverStr string) (KernelVersion, error) { + kver := make(KernelVersion, 1, 3) + for idx, ch := range kverStr { + if '0' <= ch && ch <= '9' { + v := &kver[len(kver)-1] + *v = (*v * 10) + uint64(ch-'0') + } else { + if idx == 0 || kverStr[idx-1] < '0' || '9' < kverStr[idx-1] { + // "." must be preceded by a digit while in version section + return nil, fmt.Errorf("%w %q: kernel version has dot(s) followed by non-digit in version section", errInvalidKernelVersion, kverStr) + } + if ch != '.' { + break + } + kver = append(kver, 0) + } + } + if len(kver) < 2 { + return nil, fmt.Errorf("%w %q: kernel versions must contain at least two components", errInvalidKernelVersion, kverStr) + } + return kver, nil +} + +// getKernelVersion gets the current kernel version. +var getKernelVersion = gocompat.SyncOnceValues(func() (KernelVersion, error) { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return nil, err + } + // Remove the \x00 from the release. + release := uts.Release[:] + return parseKernelVersion(string(release[:bytes.IndexByte(release, 0)])) +}) + +// GreaterEqualThan returns true if the the host kernel version is greater than +// or equal to the provided [KernelVersion]. When doing this comparison, any +// non-numerical suffixes of the host kernel version are ignored. +// +// If the number of components provided is not equal to the number of numerical +// components of the host kernel version, any missing components are treated as +// 0. This means that GreaterEqualThan(KernelVersion{4}) will be treated the +// same as GreaterEqualThan(KernelVersion{4, 0, 0, ..., 0, 0}), and that if the +// host kernel version is "4" then GreaterEqualThan(KernelVersion{4, 1}) will +// return false (because the host version will be treated as "4.0"). +func GreaterEqualThan(wantKver KernelVersion) (bool, error) { + hostKver, err := getKernelVersion() + if err != nil { + return false, err + } + + // Pad out the kernel version lengths to match one another. + cmpLen := gocompat.Max2(len(hostKver), len(wantKver)) + hostKver = append(hostKver, make(KernelVersion, cmpLen-len(hostKver))...) + wantKver = append(wantKver, make(KernelVersion, cmpLen-len(wantKver))...) + + for i := 0; i < cmpLen; i++ { + switch gocompat.CmpCompare(hostKver[i], wantKver[i]) { + case -1: + // host < want + return false, nil + case +1: + // host > want + return true, nil + case 0: + continue + } + } + // equal version values + return true, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go new file mode 100644 index 00000000..4635714f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package linux returns information about what features are supported on the +// running kernel. +package linux diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go new file mode 100644 index 00000000..b29905bf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package linux + +import ( + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion" +) + +// HasNewMountAPI returns whether the new fsopen(2) mount API is supported on +// the running kernel. +var HasNewMountAPI = gocompat.SyncOnceValue(func() bool { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.2[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err != nil { + return false + } + _ = unix.Close(fd) + + // RHEL 8 has a backport of fsopen(2) that appears to have some very + // difficult to debug performance pathology. As such, it seems prudent to + // simply reject pre-5.2 kernels. + isNotBackport, _ := kernelversion.GreaterEqualThan(kernelversion.KernelVersion{5, 2}) + return isNotBackport +}) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go new file mode 100644 index 00000000..399609dc --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package linux + +import ( + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// HasOpenat2 returns whether openat2(2) is supported on the running kernel. +var HasOpenat2 = gocompat.SyncOnceValue(func() bool { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err != nil { + return false + } + _ = unix.Close(fd) + return true +}) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go new file mode 100644 index 00000000..21e0a62e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package procfs provides a safe API for operating on /proc on Linux. Note +// that this is the *internal* procfs API, mainy needed due to Go's +// restrictions on cyclic dependencies and its incredibly minimal visibility +// system without making a separate internal/ package. +package procfs + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strconv" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" +) + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +// verifyProcHandle checks that the handle is from a procfs filesystem. +// Contrast this to [verifyProcRoot], which also verifies that the handle is +// the root of a procfs mount. +func verifyProcHandle(procHandle fd.Fd) error { + if statfs, err := fd.Fstatfs(procHandle); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + return nil +} + +// verifyProcRoot verifies that the handle is the root of a procfs filesystem. +// Contrast this to [verifyProcHandle], which only verifies if the handle is +// some file on procfs (regardless of what file it is). +func verifyProcRoot(procRoot fd.Fd) error { + if err := verifyProcHandle(procRoot); err != nil { + return err + } + if stat, err := fd.Fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +type procfsFeatures struct { + // hasSubsetPid was added in Linux 5.8, along with hidepid=ptraceable (and + // string-based hidepid= values). Before this patchset, it was not really + // safe to try to modify procfs superblock flags because the superblock was + // shared -- so if this feature is not available, **you should not set any + // superblock flags**. + // + // 6814ef2d992a ("proc: add option to mount only a pids subset") + // fa10fed30f25 ("proc: allow to mount many instances of proc in one pid namespace") + // 24a71ce5c47f ("proc: instantiate only pids that we can ptrace on 'hidepid=4' mount option") + // 1c6c4d112e81 ("proc: use human-readable values for hidepid") + // 9ff7258575d5 ("Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace") + hasSubsetPid bool +} + +var getProcfsFeatures = gocompat.SyncOnceValue(func() procfsFeatures { + if !linux.HasNewMountAPI() { + return procfsFeatures{} + } + procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return procfsFeatures{} + } + defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here + + return procfsFeatures{ + hasSubsetPid: unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") == nil, + } +}) + +func newPrivateProcMount(subset bool) (_ *Handle, Err error) { + procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here + + if subset && getProcfsFeatures().hasSubsetPid { + // Try to configure hidepid=ptraceable,subset=pid if possible, but + // ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + } + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + // TODO: Output any information from the fscontext log to debug logs. + procRoot, err := fd.Fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +func clonePrivateProcMount() (_ *Handle, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procRoot, err := fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procRoot) { + procRoot, err = fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +func privateProcRoot(subset bool) (*Handle, error) { + if !linux.HasNewMountAPI() || hookForceGetProcRootUnsafe() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount(subset) + if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +func unsafeHostProcRoot() (_ *Handle, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +// Handle is a wrapper around an *os.File handle to "/proc", which can be used +// to do further procfs-related operations in a safe way. +type Handle struct { + Inner fd.Fd + // Does this handle have subset=pid set? + isSubset bool +} + +func newHandle(procRoot fd.Fd) (*Handle, error) { + if err := verifyProcRoot(procRoot); err != nil { + // This is only used in methods that + _ = procRoot.Close() + return nil, err + } + proc := &Handle{Inner: procRoot} + // With subset=pid we can be sure that /proc/uptime will not exist. + if err := fd.Faccessat(proc.Inner, "uptime", unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil { + proc.isSubset = errors.Is(err, os.ErrNotExist) + } + return proc, nil +} + +// Close closes the underlying file for the Handle. +func (proc *Handle) Close() error { return proc.Inner.Close() } + +var getCachedProcRoot = gocompat.SyncOnceValue(func() *Handle { + procRoot, err := getProcRoot(true) + if err != nil { + return nil // just don't cache if we see an error + } + if !procRoot.isSubset { + return nil // we only cache verified subset=pid handles + } + + // Disarm (*Handle).Close() to stop someone from accidentally closing + // the global handle. + procRoot.Inner = fd.NopCloser(procRoot.Inner) + return procRoot +}) + +// OpenProcRoot tries to open a "safer" handle to "/proc". +func OpenProcRoot() (*Handle, error) { + if proc := getCachedProcRoot(); proc != nil { + return proc, nil + } + return getProcRoot(true) +} + +// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or +// masked paths (but also without "subset=pid"). +func OpenUnsafeProcRoot() (*Handle, error) { return getProcRoot(false) } + +func getProcRoot(subset bool) (*Handle, error) { + proc, err := privateProcRoot(subset) + if err != nil { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + proc, err = unsafeHostProcRoot() + } + return proc, err +} + +var hasProcThreadSelf = gocompat.SyncOnceValue(func() bool { + return unix.Access("/proc/thread-self/", unix.F_OK) == nil +}) + +var errUnsafeProcfs = errors.New("unsafe procfs detected") + +// lookup is a very minimal wrapper around [procfsLookupInRoot] which is +// intended to be called from the external API. +func (proc *Handle) lookup(subpath string) (*os.File, error) { + handle, err := procfsLookupInRoot(proc.Inner, subpath) + if err != nil { + return nil, err + } + return handle, nil +} + +// procfsBase is an enum indicating the prefix of a subpath in operations +// involving [Handle]s. +type procfsBase string + +const ( + // ProcRoot refers to the root of the procfs (i.e., "/proc/"). + ProcRoot procfsBase = "/proc" + // ProcSelf refers to the current process' subdirectory (i.e., + // "/proc/self/"). + ProcSelf procfsBase = "/proc/self" + // ProcThreadSelf refers to the current thread's subdirectory (i.e., + // "/proc/thread-self/"). In multi-threaded programs (i.e., all Go + // programs) where one thread has a different CLONE_FS, it is possible for + // "/proc/self" to point the wrong thread and so "/proc/thread-self" may be + // necessary. Note that on pre-3.17 kernels, "/proc/thread-self" doesn't + // exist and so a fallback will be used in that case. + ProcThreadSelf procfsBase = "/proc/thread-self" + // TODO: Switch to an interface setup so we can have a more type-safe + // version of ProcPid and remove the need to worry about invalid string + // values. +) + +// prefix returns a prefix that can be used with the given [Handle]. +func (base procfsBase) prefix(proc *Handle) (string, error) { + switch base { + case ProcRoot: + return ".", nil + case ProcSelf: + return "self", nil + case ProcThreadSelf: + threadSelf := "thread-self" + if !hasProcThreadSelf() || hookForceProcSelfTask() { + // Pre-3.17 kernels don't have /proc/thread-self, so do it + // manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + if err := fd.Faccessat(proc.Inner, threadSelf, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { + // In this case, we running in a pid namespace that doesn't + // match the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID + // to use here because of the age of the kernel, so we have to + // just use /proc/self and hope that it works. + threadSelf = "self" + } + } + return threadSelf, nil + } + return "", fmt.Errorf("invalid procfs base %q", base) +} + +// ProcThreadSelfCloser is a callback that needs to be called when you are done +// operating on an [os.File] fetched using [ProcThreadSelf]. +// +// [os.File]: https://pkg.go.dev/os#File +type ProcThreadSelfCloser func() + +// open is the core lookup operation for [Handle]. It returns a handle to +// "/proc//". If the returned [ProcThreadSelfCloser] is non-nil, +// you should call it after you are done interacting with the returned handle. +// +// In general you should use prefer to use the other helpers, as they remove +// the need to interact with [procfsBase] and do not return a nil +// [ProcThreadSelfCloser] for [procfsBase] values other than [ProcThreadSelf] +// where it is necessary. +func (proc *Handle) open(base procfsBase, subpath string) (_ *os.File, closer ProcThreadSelfCloser, Err error) { + prefix, err := base.prefix(proc) + if err != nil { + return nil, nil, err + } + subpath = prefix + "/" + subpath + + switch base { + case ProcRoot: + file, err := proc.lookup(subpath) + if errors.Is(err, os.ErrNotExist) { + // The Handle handle in use might be a subset=pid one, which will + // result in spurious errors. In this case, just open a temporary + // unmasked procfs handle for this operation. + proc, err2 := OpenUnsafeProcRoot() // !subset=pid + if err2 != nil { + return nil, nil, err + } + defer proc.Close() //nolint:errcheck // close failures aren't critical here + + file, err = proc.lookup(subpath) + } + return file, nil, err + + case ProcSelf: + file, err := proc.lookup(subpath) + return file, nil, err + + case ProcThreadSelf: + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get + // interrupted by the Go runtime and hit the case where the underlying + // thread is swapped out and the original thread is killed, resulting + // in pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + closer = nil + } + }() + + file, err := proc.lookup(subpath) + return file, runtime.UnlockOSThread, err + } + // should never be reached + return nil, nil, fmt.Errorf("[internal error] invalid procfs base %q", base) +} + +// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an +// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +func (proc *Handle) OpenThreadSelf(subpath string) (_ *os.File, _ ProcThreadSelfCloser, Err error) { + return proc.open(ProcThreadSelf, subpath) +} + +// OpenSelf returns a handle to /proc/self/. +func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { + file, closer, err := proc.open(ProcSelf, subpath) + assert.Assert(closer == nil, "closer for ProcSelf must be nil") + return file, err +} + +// OpenRoot returns a handle to /proc/. +func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { + file, closer, err := proc.open(ProcRoot, subpath) + assert.Assert(closer == nil, "closer for ProcRoot must be nil") + return file, err +} + +// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). +// This is mainly intended for usage when operating on other processes. +func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { + return proc.OpenRoot(strconv.Itoa(pid) + "/" + subpath) +} + +// checkSubpathOvermount checks if the dirfd and path combination is on the +// same mount as the given root. +func checkSubpathOvermount(root, dir fd.Fd, path string) error { + // Get the mntID of our procfs handle. + expectedMountID, err := fd.GetMountID(root, "") + if err != nil { + return fmt.Errorf("get root mount id: %w", err) + } + // Get the mntID of the target magic-link. + gotMountID, err := fd.GetMountID(dir, path) + if err != nil { + return fmt.Errorf("get subpath mount id: %w", err) + } + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountID != gotMountID { + return fmt.Errorf("%w: subpath %s/%s has an overmount obscuring the real path (mount ids do not match %d != %d)", + errUnsafeProcfs, dir.Name(), path, expectedMountID, gotMountID) + } + return nil +} + +// Readlink performs a readlink operation on "/proc//" in a way +// that should be free from race attacks. This is most commonly used to get the +// real path of a file by looking at "/proc/self/fd/$n", with the same safety +// protections as [Open] (as well as some additional checks against +// overmounts). +func (proc *Handle) Readlink(base procfsBase, subpath string) (string, error) { + link, closer, err := proc.open(base, subpath) + if closer != nil { + defer closer() + } + if err != nil { + return "", fmt.Errorf("get safe %s/%s handle: %w", base, subpath, err) + } + defer link.Close() //nolint:errcheck // close failures aren't critical here + + // Try to detect if there is a mount on top of the magic-link. This should + // be safe in general (a mount on top of the path afterwards would not + // affect the handle itself) and will definitely be safe if we are using + // privateProcRoot() (at least since Linux 5.12[1], when anonymous mount + // namespaces were completely isolated from external mounts including mount + // propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSubpathOvermount(proc.Inner, link, ""); err != nil { + return "", fmt.Errorf("check safety of %s/%s magiclink: %w", base, subpath, err) + } + + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit + // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty + // relative pathnames"). + return fd.Readlinkat(link, "") +} + +// ProcSelfFdReadlink gets the real path of the given file by looking at +// readlink(/proc/thread-self/fd/$n). +// +// This is just a wrapper around [Handle.Readlink]. +func ProcSelfFdReadlink(fd fd.Fd) (string, error) { + procRoot, err := OpenProcRoot() // subset=pid + if err != nil { + return "", err + } + defer procRoot.Close() //nolint:errcheck // close failures aren't critical here + + fdPath := "fd/" + strconv.Itoa(int(fd.Fd())) + return procRoot.Readlink(ProcThreadSelf, fdPath) +} + +// CheckProcSelfFdPath returns whether the given file handle matches the +// expected path. (This is inherently racy.) +func CheckProcSelfFdPath(path string, file fd.Fd) error { + if err := fd.IsDeadInode(file); err != nil { + return err + } + actualPath, err := ProcSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", internal.ErrPossibleBreakout, actualPath, path) + } + return nil +} + +// ReopenFd takes an existing file descriptor and "re-opens" it through +// /proc/thread-self/fd/. This allows for O_PATH file descriptors to be +// upgraded to regular file descriptors, as well as changing the open mode of a +// regular file descriptor. Some filesystems have unique handling of open(2) +// which make this incredibly useful (such as /dev/ptmx). +func ReopenFd(handle fd.Fd, flags int) (*os.File, error) { + procRoot, err := OpenProcRoot() // subset=pid + if err != nil { + return nil, err + } + defer procRoot.Close() //nolint:errcheck // close failures aren't critical here + + // We can't operate on /proc/thread-self/fd/$n directly when doing a + // re-open, so we need to open /proc/thread-self/fd and then open a single + // final component. + procFdDir, closer, err := procRoot.OpenThreadSelf("fd/") + if err != nil { + return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) + } + defer procFdDir.Close() //nolint:errcheck // close failures aren't critical here + defer closer() + + // Try to detect if there is a mount on top of the magic-link we are about + // to open. If we are using unsafeHostProcRoot(), this could change after + // we check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + fdStr := strconv.Itoa(int(handle.Fd())) + if err := checkSubpathOvermount(procRoot.Inner, procFdDir, fdStr); err != nil { + return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) + } + + flags |= unix.O_CLOEXEC + // Rather than just wrapping fd.Openat, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil +} + +// Test hooks used in the procfs tests to verify that the fallback logic works. +// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. +var ( + hookForcePrivateProcRootOpenTree = hookDummyFile + hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile + hookForceGetProcRootUnsafe = hookDummy + + hookForceProcSelfTask = hookDummy + hookForceProcSelf = hookDummy +) + +func hookDummy() bool { return false } +func hookDummyFile(_ io.Closer) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go new file mode 100644 index 00000000..1ad1f18e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// This code is adapted to be a minimal version of the libpathrs proc resolver +// . +// As we only need O_PATH|O_NOFOLLOW support, this is not too much to port. + +package procfs + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/internal/consts" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" +) + +// procfsLookupInRoot is a stripped down version of completeLookupInRoot, +// entirely designed to support the very small set of features necessary to +// make procfs handling work. Unlike completeLookupInRoot, we always have +// O_PATH|O_NOFOLLOW behaviour for trailing symlinks. +// +// The main restrictions are: +// +// - ".." is not supported (as it requires either os.Root-style replays, +// which is more bug-prone; or procfs verification, which is not possible +// due to re-entrancy issues). +// - Absolute symlinks for the same reason (and all absolute symlinks in +// procfs are magic-links, which we want to skip anyway). +// - If statx is supported (checkSymlinkOvermount), any mount-point crossings +// (which is the main attack of concern against /proc). +// - Partial lookups are not supported, so the symlink stack is not needed. +// - Trailing slash special handling is not necessary in most cases (if we +// operating on procfs, it's usually with programmer-controlled strings +// that will then be re-opened), so we skip it since whatever re-opens it +// can deal with it. It's a creature comfort anyway. +// +// If the system supports openat2(), this is implemented using equivalent flags +// (RESOLVE_BENEATH | RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS). +func procfsLookupInRoot(procRoot fd.Fd, unsafePath string) (Handle *os.File, _ error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // Make sure that an empty unsafe path still returns something sane, even + // with openat2 (which doesn't have AT_EMPTY_PATH semantics yet). + if unsafePath == "" { + unsafePath = "." + } + + // This is already checked by getProcRoot, but make sure here since the + // core security of this lookup is based on this assumption. + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + + if linux.HasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // strictly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // TODO: It would be nice to have RESOLVE_NO_DOTDOT, purely for + // self-consistency with the backup O_PATH resolver. + handle, err := fd.Openat2(procRoot, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + // err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, gocompat.WrapBaseError(err, errUnsafeProcfs) + } + return handle, nil + } + + // To mirror openat2(RESOLVE_BENEATH), we need to return an error if the + // path is absolute. + if path.IsAbs(unsafePath) { + return nil, fmt.Errorf("%w: cannot resolve absolute paths in procfs resolver", internal.ErrPossibleBreakout) + } + + currentDir, err := fd.Dup(procRoot) + if err != nil { + return nil, fmt.Errorf("clone root fd: %w", err) + } + defer func() { + // If a handle is not returned, close the internal handle. + if Handle == nil { + _ = currentDir.Close() + } + }() + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + if part == "" { + // no-op component, but treat it the same as "." + part = "." + } + if part == ".." { + // not permitted + return nil, fmt.Errorf("%w: cannot walk into '..' in procfs resolver", internal.ErrPossibleBreakout) + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. (Not to mention that ".." isn't allowed.) + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + // Jump to root. + rootClone, err := fd.Dup(procRoot) + if err != nil { + return nil, fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + + // Make sure we are still on procfs and haven't crossed mounts. + if err := verifyProcHandle(nextDir); err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("check %q component is on procfs: %w", part, err) + } + if err := checkSubpathOvermount(procRoot, nextDir, ""); err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("check %q component is not overmounted: %w", part, err) + } + + // We are emulating O_PATH|O_NOFOLLOW, so we only need to traverse into + // trailing symlinks if we are not the final component. Otherwise we + // can just return the currentDir. + if remainingPath != "" { + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("stat component %q: %w", part, err) + } + + if st.Mode()&os.ModeType == os.ModeSymlink { + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See + // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and + // fstatat() with empty relative pathnames"). + linkDest, err := fd.Readlinkat(nextDir, "") + // We don't need the handle anymore. + _ = nextDir.Close() + if err != nil { + return nil, err + } + + linksWalked++ + if linksWalked > consts.MaxSymlinkLimit { + return nil, &os.PathError{Op: "securejoin.procfsLookupInRoot", Path: "/proc/" + unsafePath, Err: unix.ELOOP} + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks are probably magiclinks, we reject them. + if path.IsAbs(linkDest) { + return nil, fmt.Errorf("%w: cannot jump to / in procfs resolver -- possible magiclink", internal.ErrPossibleBreakout) + } + continue + } + } + + // Walk into the next component. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + } + + // One final sanity-check. + if err := verifyProcHandle(currentDir); err != nil { + return nil, fmt.Errorf("check final handle is on procfs: %w", err) + } + if err := checkSubpathOvermount(procRoot, currentDir, ""); err != nil { + return nil, fmt.Errorf("check final handle is not overmounted: %w", err) + } + return currentDir, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go similarity index 86% rename from vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go index be81e498..f47504e6 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go @@ -1,10 +1,15 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( "errors" @@ -15,6 +20,12 @@ import ( "strings" "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/internal/consts" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" ) type symlinkStackEntry struct { @@ -112,12 +123,12 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro return nil } // Split the link target and clean up any "" parts. - linkTargetParts := slices_DeleteFunc( + linkTargetParts := gocompat.SlicesDeleteFunc( strings.Split(linkTarget, "/"), func(part string) bool { return part == "" || part == "." }) // Copy the directory so the caller doesn't close our copy. - dirCopy, err := dupFile(dir) + dirCopy, err := fd.Dup(dir) if err != nil { return err } @@ -159,11 +170,11 @@ func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { // within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing // component of the requested path, returning a file handle to the final // existing component and a string containing the remaining path components. -func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { +func partialLookupInRoot(root fd.Fd, unsafePath string) (*os.File, string, error) { return lookupInRoot(root, unsafePath, true) } -func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { +func completeLookupInRoot(root fd.Fd, unsafePath string) (*os.File, error) { handle, remainingPath, err := lookupInRoot(root, unsafePath, false) if remainingPath != "" && err == nil { // should never happen @@ -174,7 +185,7 @@ func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { return handle, err } -func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { +func lookupInRoot(root fd.Fd, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { unsafePath = filepath.ToSlash(unsafePath) // noop // This is very similar to SecureJoin, except that we operate on the @@ -182,20 +193,20 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // managed open, along with the remaining path components not opened. // Try to use openat2 if possible. - if hasOpenat2() { + if linux.HasOpenat2() { return lookupOpenat2(root, unsafePath, partial) } // Get the "actual" root path from /proc/self/fd. This is necessary if the // root is some magic-link like /proc/$pid/root, in which case we want to - // make sure when we do checkProcSelfFdPath that we are using the correct - // root path. - logicalRootPath, err := procSelfFdReadlink(root) + // make sure when we do procfs.CheckProcSelfFdPath that we are using the + // correct root path. + logicalRootPath, err := procfs.ProcSelfFdReadlink(root) if err != nil { return nil, "", fmt.Errorf("get real root path: %w", err) } - currentDir, err := dupFile(root) + currentDir, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -260,7 +271,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) } // Jump to root. - rootClone, err := dupFile(root) + rootClone, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -271,21 +282,21 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi } // Try to open the next component. - nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - switch { - case err == nil: + nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch err { + case nil: st, err := nextDir.Stat() if err != nil { _ = nextDir.Close() return nil, "", fmt.Errorf("stat component %q: %w", part, err) } - switch st.Mode() & os.ModeType { + switch st.Mode() & os.ModeType { //nolint:exhaustive // just a glorified if statement case os.ModeSymlink: // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and // fstatat() with empty relative pathnames"). - linkDest, err := readlinkatFile(nextDir, "") + linkDest, err := fd.Readlinkat(nextDir, "") // We don't need the handle anymore. _ = nextDir.Close() if err != nil { @@ -293,7 +304,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi } linksWalked++ - if linksWalked > maxSymlinkLimit { + if linksWalked > consts.MaxSymlinkLimit { return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} } @@ -307,7 +318,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // Absolute symlinks reset any work we've already done. if path.IsAbs(linkDest) { // Jump to root. - rootClone, err := dupFile(root) + rootClone, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -335,12 +346,12 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // rename or mount on the system. if part == ".." { // Make sure the root hasn't moved. - if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + if err := procfs.CheckProcSelfFdPath(logicalRootPath, root); err != nil { return nil, "", fmt.Errorf("root path moved during lookup: %w", err) } // Make sure the path is what we expect. fullPath := logicalRootPath + nextPath - if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + if err := procfs.CheckProcSelfFdPath(fullPath, currentDir); err != nil { return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) } } @@ -371,7 +382,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // context of openat2, a trailing slash and a trailing "/." are completely // equivalent. if strings.HasSuffix(unsafePath, "/") { - nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + nextDir, err := fd.Openat(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) if err != nil { if !partial { _ = currentDir.Close() diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go similarity index 86% rename from vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go index a17ae3b0..f3c62b0d 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go @@ -1,10 +1,15 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( "errors" @@ -14,12 +19,13 @@ import ( "strings" "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" ) -var ( - errInvalidMode = errors.New("invalid permission mode") - errPossibleAttack = errors.New("possible attack detected") -) +var errInvalidMode = errors.New("invalid permission mode") // modePermExt is like os.ModePerm except that it also includes the set[ug]id // and sticky bits. @@ -66,6 +72,8 @@ func toUnixMode(mode os.FileMode) (uint32, error) { // a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after // doing [MkdirAll]. If you intend to open the directory after creating it, you // should use MkdirAllHandle. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { unixMode, err := toUnixMode(mode) if err != nil { @@ -102,7 +110,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // // This is mostly a quality-of-life check, because mkdir will simply fail // later if the attacker deletes the tree after this check. - if err := isDeadInode(currentDir); err != nil { + if err := fd.IsDeadInode(currentDir); err != nil { return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) } @@ -113,13 +121,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) } else if err != nil { return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) - } else { + } else { //nolint:revive // indent-error-flow lint doesn't make sense here _ = currentDir.Close() currentDir = reopenDir } remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if slices_Contains(remainingParts, "..") { + if gocompat.SlicesContains(remainingParts, "..") { // The path contained ".." components after the end of the "real" // components. We could try to safely resolve ".." here but that would // add a bunch of extra logic for something that it's not clear even @@ -150,12 +158,12 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} // Make the error a bit nicer if the directory is dead. - if deadErr := isDeadInode(currentDir); deadErr != nil { + if deadErr := fd.IsDeadInode(currentDir); deadErr != nil { // TODO: Once we bump the minimum Go version to 1.20, we can use // multiple %w verbs for this wrapping. For now we need to use a // compatibility shim for older Go versions. - //err = fmt.Errorf("%w (%w)", err, deadErr) - err = wrapBaseError(err, deadErr) + // err = fmt.Errorf("%w (%w)", err, deadErr) + err = gocompat.WrapBaseError(err, deadErr) } return nil, err } @@ -163,13 +171,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // Get a handle to the next component. O_DIRECTORY means we don't need // to use O_PATH. var nextDir *os.File - if hasOpenat2() { - nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + if linux.HasOpenat2() { + nextDir, err = openat2(currentDir, part, &unix.OpenHow{ Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, }) } else { - nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + nextDir, err = fd.Openat(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) } if err != nil { return nil, err @@ -220,12 +228,14 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // If you plan to open the directory after you have created it or want to use // an open directory handle as the root, you should use [MkdirAllHandle] instead. // This function is a wrapper around [MkdirAllHandle]. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func MkdirAll(root, unsafePath string, mode os.FileMode) error { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return err } - defer rootDir.Close() + defer rootDir.Close() //nolint:errcheck // close failures aren't critical here f, err := MkdirAllHandle(rootDir, unsafePath, mode) if err != nil { diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go similarity index 56% rename from vendor/github.com/cyphar/filepath-securejoin/open_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go index 230be73f..7492d8cf 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go @@ -1,17 +1,22 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( - "fmt" "os" - "strconv" "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" ) // OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided @@ -40,12 +45,14 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { // disconnected TTY that could cause a DoS, or some other issue). In order to // use the returned handle, you can "upgrade" it to a proper handle using // [Reopen]. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func OpenInRoot(root, unsafePath string) (*os.File, error) { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return nil, err } - defer rootDir.Close() + defer rootDir.Close() //nolint:errcheck // close failures aren't critical here return OpenatInRoot(rootDir, unsafePath) } @@ -63,41 +70,5 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) { // // [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw func Reopen(handle *os.File, flags int) (*os.File, error) { - procRoot, err := getProcRoot() - if err != nil { - return nil, err - } - - // We can't operate on /proc/thread-self/fd/$n directly when doing a - // re-open, so we need to open /proc/thread-self/fd and then open a single - // final component. - procFdDir, closer, err := procThreadSelf(procRoot, "fd/") - if err != nil { - return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) - } - defer procFdDir.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link we are about - // to open. If we are using unsafeHostProcRoot(), this could change after - // we check it (and there's nothing we can do about that) but for - // privateProcRoot() this should be guaranteed to be safe (at least since - // Linux 5.12[1], when anonymous mount namespaces were completely isolated - // from external mounts including mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - fdStr := strconv.Itoa(int(handle.Fd())) - if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { - return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) - } - - flags |= unix.O_CLOEXEC - // Rather than just wrapping openatFile, open-code it so we can copy - // handle.Name(). - reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) - if err != nil { - return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) - } - return os.NewFile(uintptr(reopenFd), handle.Name()), nil + return procfs.ReopenFd(handle, flags) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go new file mode 100644 index 00000000..937bc435 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package pathrs + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/procfs" +) + +func openat2(dir fd.Fd, path string, how *unix.OpenHow) (*os.File, error) { + file, err := fd.Openat2(dir, path, how) + if err != nil { + return nil, err + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := procfs.ProcSelfFdReadlink(file); err == nil { + // TODO: Ideally we would not need to dup the fd, but you cannot + // easily just swap an *os.File with one from the same fd + // (the GC will close the old one, and you cannot clear the + // finaliser easily because it is associated with an internal + // field of *os.File not *os.File itself). + newFile, err := fd.DupWithName(file, actualPath) + if err != nil { + return nil, err + } + file = newFile + } + } + return file, nil +} + +func lookupOpenat2(root fd.Fd, unsafePath string, partial bool) (*os.File, string, error) { + if !partial { + file, err := openat2(root, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + return file, "", err + } + return partialLookupOpenat2(root, unsafePath) +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root fd.Fd, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + var lastError error + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx++ + } + // We found a subpath! + return handle, unsafePath[endIdx:], lastError + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + lastError = err + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := fd.Dup(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, lastError +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go new file mode 100644 index 00000000..ec187a41 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package procfs provides a safe API for operating on /proc on Linux. +package procfs + +import ( + "os" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" +) + +// This package mostly just wraps internal/procfs APIs. This is necessary +// because we are forced to export some things from internal/procfs in order to +// avoid some dependency cycle issues, but we don't want users to see or use +// them. + +// ProcThreadSelfCloser is a callback that needs to be called when you are done +// operating on an [os.File] fetched using [Handle.OpenThreadSelf]. +// +// [os.File]: https://pkg.go.dev/os#File +type ProcThreadSelfCloser = procfs.ProcThreadSelfCloser + +// Handle is a wrapper around an *os.File handle to "/proc", which can be used +// to do further procfs-related operations in a safe way. +type Handle struct { + inner *procfs.Handle +} + +// Close close the resources associated with this [Handle]. Note that if this +// [Handle] was created with [OpenProcRoot], on some kernels the underlying +// procfs handle is cached and so this Close operation may be a no-op. However, +// you should always call Close on [Handle]s once you are done with them. +func (proc *Handle) Close() error { return proc.inner.Close() } + +// OpenProcRoot tries to open a "safer" handle to "/proc" (i.e., one with the +// "subset=pid" mount option applied, available from Linux 5.8). Unless you +// plan to do many [Handle.OpenRoot] operations, users should prefer to use +// this over [OpenUnsafeProcRoot] which is far more dangerous to keep open. +// +// If a safe handle cannot be opened, OpenProcRoot will fall back to opening a +// regular "/proc" handle. +// +// Note that using [Handle.OpenRoot] will still work with handles returned by +// this function. If a subpath cannot be operated on with a safe "/proc" +// handle, then [OpenUnsafeProcRoot] will be called internally and a temporary +// unsafe handle will be used. +func OpenProcRoot() (*Handle, error) { + proc, err := procfs.OpenProcRoot() + if err != nil { + return nil, err + } + return &Handle{inner: proc}, nil +} + +// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or +// masked paths. You must be extremely careful to make sure this handle is +// never leaked to a container and that you program cannot be tricked into +// writing to arbitrary paths within it. +// +// This is not necessary if you just wish to use [Handle.OpenRoot], as handles +// returned by [OpenProcRoot] will fall back to using a *temporary* unsafe +// handle in that case. You should only really use this if you need to do many +// operations with [Handle.OpenRoot] and the performance overhead of making +// many procfs handles is an issue. If you do use OpenUnsafeProcRoot, you +// should make sure to close the handle as soon as possible to avoid +// known-fd-number attacks. +func OpenUnsafeProcRoot() (*Handle, error) { + proc, err := procfs.OpenUnsafeProcRoot() + if err != nil { + return nil, err + } + return &Handle{inner: proc}, nil +} + +// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an +// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). +// Once finished with the handle, you must call the returned closer function +// ([runtime.UnlockOSThread]). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// [runtime.UnlockOSThread]: https://pkg.go.dev/runtime#UnlockOSThread +func (proc *Handle) OpenThreadSelf(subpath string) (*os.File, ProcThreadSelfCloser, error) { + return proc.inner.OpenThreadSelf(subpath) +} + +// OpenSelf returns a handle to /proc/self/. +// +// Note that in Go programs with non-homogenous threads, this may result in +// spurious errors. If you are monkeying around with APIs that are +// thread-specific, you probably want to use [Handle.OpenThreadSelf] instead +// which will guarantee that the handle refers to the same thread as the caller +// is executing on. +func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { + return proc.inner.OpenSelf(subpath) +} + +// OpenRoot returns a handle to /proc/. +// +// You should only use this when you need to operate on global procfs files +// (such as sysctls in /proc/sys). Unlike [Handle.OpenThreadSelf], +// [Handle.OpenSelf], and [Handle.OpenPid], the procfs handle used internally +// for this operation will never use "subset=pid", which makes it a more juicy +// target for [CVE-2024-21626]-style attacks (and doing something like opening +// a directory with OpenRoot effectively leaks [OpenUnsafeProcRoot] as long as +// the file descriptor is open). +// +// [CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv +func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { + return proc.inner.OpenRoot(subpath) +} + +// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). +// This is mainly intended for usage when operating on other processes. +// +// You should not use this for the current thread, as special handling is +// needed for /proc/thread-self (or /proc/self/task/) when dealing with +// goroutine scheduling -- use [Handle.OpenThreadSelf] instead. +// +// To refer to the current thread-group, you should use prefer +// [Handle.OpenSelf] to passing os.Getpid as the pid argument. +func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { + return proc.inner.OpenPid(pid, subpath) +} + +// ProcSelfFdReadlink gets the real path of the given file by looking at +// /proc/self/fd/ with [readlink]. It is effectively just shorthand for +// something along the lines of: +// +// proc, err := procfs.OpenProcRoot() +// if err != nil { +// return err +// } +// link, err := proc.OpenThreadSelf(fmt.Sprintf("fd/%d", f.Fd())) +// if err != nil { +// return err +// } +// defer link.Close() +// var buf [4096]byte +// n, err := unix.Readlinkat(int(link.Fd()), "", buf[:]) +// if err != nil { +// return err +// } +// pathname := buf[:n] +// +// [readlink]: https://pkg.go.dev/golang.org/x/sys/unix#Readlinkat +func ProcSelfFdReadlink(f *os.File) (string, error) { + return procfs.ProcSelfFdReadlink(f) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go deleted file mode 100644 index 809a579c..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ /dev/null @@ -1,452 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -func fstat(f *os.File) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstat(int(f.Fd()), &stat); err != nil { - return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} - } - return stat, nil -} - -func fstatfs(f *os.File) (unix.Statfs_t, error) { - var statfs unix.Statfs_t - if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { - return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} - } - return statfs, nil -} - -// The kernel guarantees that the root inode of a procfs mount has an -// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. -const ( - procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC - procRootIno = 1 // PROC_ROOT_INO -) - -func verifyProcRoot(procRoot *os.File) error { - if statfs, err := fstatfs(procRoot); err != nil { - return err - } else if statfs.Type != procSuperMagic { - return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - if stat, err := fstat(procRoot); err != nil { - return err - } else if stat.Ino != procRootIno { - return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) - } - return nil -} - -var hasNewMountApi = sync_OnceValue(func() bool { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func fsopen(fsName string, flags int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSOPEN_CLOEXEC - fd, err := unix.Fsopen(fsName, flags) - if err != nil { - return nil, os.NewSyscallError("fsopen "+fsName, err) - } - return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil -} - -func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSMOUNT_CLOEXEC - fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) - if err != nil { - return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) - } - return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil -} - -func newPrivateProcMount() (*os.File, error) { - procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return nil, err - } - defer procfsCtx.Close() - - // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") - - // Get an actual handle. - if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { - return nil, os.NewSyscallError("fsconfig create procfs", err) - } - return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) -} - -func openTree(dir *os.File, path string, flags uint) (*os.File, error) { - dirFd := -int(unix.EBADF) - dirName := "." - if dir != nil { - dirFd = int(dir.Fd()) - dirName = dir.Name() - } - // Make sure we always set O_CLOEXEC. - flags |= unix.OPEN_TREE_CLOEXEC - fd, err := unix.OpenTree(dirFd, path, flags) - if err != nil { - return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} - } - return os.NewFile(uintptr(fd), dirName+"/"+path), nil -} - -func clonePrivateProcMount() (_ *os.File, Err error) { - // Try to make a clone without using AT_RECURSIVE if we can. If this works, - // we can be sure there are no over-mounts and so if the root is valid then - // we're golden. Otherwise, we have to deal with over-mounts. - procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { - procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) - } - if err != nil { - return nil, fmt.Errorf("creating a detached procfs clone: %w", err) - } - defer func() { - if Err != nil { - _ = procfsHandle.Close() - } - }() - if err := verifyProcRoot(procfsHandle); err != nil { - return nil, err - } - return procfsHandle, nil -} - -func privateProcRoot() (*os.File, error) { - if !hasNewMountApi() || hookForceGetProcRootUnsafe() { - return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) - } - // Try to create a new procfs mount from scratch if we can. This ensures we - // can get a procfs mount even if /proc is fake (for whatever reason). - procRoot, err := newPrivateProcMount() - if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { - // Try to clone /proc then... - procRoot, err = clonePrivateProcMount() - } - return procRoot, err -} - -func unsafeHostProcRoot() (_ *os.File, Err error) { - procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - if err := verifyProcRoot(procRoot); err != nil { - return nil, err - } - return procRoot, nil -} - -func doGetProcRoot() (*os.File, error) { - procRoot, err := privateProcRoot() - if err != nil { - // Fall back to using a /proc handle if making a private mount failed. - // If we have openat2, at least we can avoid some kinds of over-mount - // attacks, but without openat2 there's not much we can do. - procRoot, err = unsafeHostProcRoot() - } - return procRoot, err -} - -var getProcRoot = sync_OnceValues(func() (*os.File, error) { - return doGetProcRoot() -}) - -var hasProcThreadSelf = sync_OnceValue(func() bool { - return unix.Access("/proc/thread-self/", unix.F_OK) == nil -}) - -var errUnsafeProcfs = errors.New("unsafe procfs detected") - -type procThreadSelfCloser func() - -// procThreadSelf returns a handle to /proc/thread-self/ (or an -// equivalent handle on older kernels where /proc/thread-self doesn't exist). -// Once finished with the handle, you must call the returned closer function -// (runtime.UnlockOSThread). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -// -// This is similar to ProcThreadSelf from runc, but with extra hardening -// applied and using *os.File. -func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { - // We need to lock our thread until the caller is done with the handle - // because between getting the handle and using it we could get interrupted - // by the Go runtime and hit the case where the underlying thread is - // swapped out and the original thread is killed, resulting in - // pull-your-hair-out-hard-to-debug issues in the caller. - runtime.LockOSThread() - defer func() { - if Err != nil { - runtime.UnlockOSThread() - } - }() - - // Figure out what prefix we want to use. - threadSelf := "thread-self/" - if !hasProcThreadSelf() || hookForceProcSelfTask() { - /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. - threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { - // In this case, we running in a pid namespace that doesn't match - // the /proc mount we have. This can happen inside runc. - // - // Unfortunately, there is no nice way to get the correct TID to - // use here because of the age of the kernel, so we have to just - // use /proc/self and hope that it works. - threadSelf = "self/" - } - } - - // Grab the handle. - var ( - handle *os.File - err error - ) - if hasOpenat2() { - // We prefer being able to use RESOLVE_NO_XDEV if we can, to be - // absolutely sure we are operating on a clean /proc handle that - // doesn't have any cheeky overmounts that could trick us (including - // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // strictly needed, but just use it since we have it. - // - // NOTE: /proc/self is technically a magic-link (the contents of the - // symlink are generated dynamically), but it doesn't use - // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. - // - // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses - // procSelfFdReadlink to clean up the returned f.Name() if we use - // RESOLVE_IN_ROOT (which would lead to an infinite recursion). - handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, - }) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - } else { - handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - defer func() { - if Err != nil { - _ = handle.Close() - } - }() - // We can't detect bind-mounts of different parts of procfs on top of - // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we - // aren't on the wrong filesystem here. - if statfs, err := fstatfs(handle); err != nil { - return nil, nil, err - } else if statfs.Type != procSuperMagic { - return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - } - return handle, runtime.UnlockOSThread, nil -} - -// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to -// avoid bumping the requirement for a single constant we can just define it -// ourselves. -const STATX_MNT_ID_UNIQUE = 0x4000 - -var hasStatxMountId = sync_OnceValue(func() bool { - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) - return err == nil && stx.Mask&wantStxMask != 0 -}) - -func getMountId(dir *os.File, path string) (uint64, error) { - // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. - if !hasStatxMountId() { - return 0, nil - } - - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - - err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) - if stx.Mask&wantStxMask == 0 { - // It's not a kernel limitation, for some reason we couldn't get a - // mount ID. Assume it's some kind of attack. - err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) - } - if err != nil { - return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} - } - return stx.Mnt_id, nil -} - -func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { - // Get the mntId of our procfs handle. - expectedMountId, err := getMountId(procRoot, "") - if err != nil { - return err - } - // Get the mntId of the target magic-link. - gotMountId, err := getMountId(dir, path) - if err != nil { - return err - } - // As long as the directory mount is alive, even with wrapping mount IDs, - // we would expect to see a different mount ID here. (Of course, if we're - // using unsafeHostProcRoot() then an attaker could change this after we - // did this check.) - if expectedMountId != gotMountId { - return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) - } - return nil -} - -func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { - fdPath := fmt.Sprintf("fd/%d", fd) - procFdLink, closer, err := procThreadSelf(procRoot, fdPath) - if err != nil { - return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) - } - defer procFdLink.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly - // provide to the closure. If the closure uses the handle directly, this - // should be safe in general (a mount on top of the path afterwards would - // not affect the handle itself) and will definitely be safe if we are - // using privateProcRoot() (at least since Linux 5.12[1], when anonymous - // mount namespaces were completely isolated from external mounts including - // mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { - return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) - } - - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit - // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty - // relative pathnames"). - return readlinkatFile(procFdLink, "") -} - -func rawProcSelfFdReadlink(fd int) (string, error) { - procRoot, err := getProcRoot() - if err != nil { - return "", err - } - return doRawProcSelfFdReadlink(procRoot, fd) -} - -func procSelfFdReadlink(f *os.File) (string, error) { - return rawProcSelfFdReadlink(int(f.Fd())) -} - -var ( - errPossibleBreakout = errors.New("possible breakout detected") - errInvalidDirectory = errors.New("wandered into deleted directory") - errDeletedInode = errors.New("cannot verify path of deleted inode") -) - -func isDeadInode(file *os.File) error { - // If the nlink of a file drops to 0, there is an attacker deleting - // directories during our walk, which could result in weird /proc values. - // It's better to error out in this case. - stat, err := fstat(file) - if err != nil { - return fmt.Errorf("check for dead inode: %w", err) - } - if stat.Nlink == 0 { - err := errDeletedInode - if stat.Mode&unix.S_IFMT == unix.S_IFDIR { - err = errInvalidDirectory - } - return fmt.Errorf("%w %q", err, file.Name()) - } - return nil -} - -func checkProcSelfFdPath(path string, file *os.File) error { - if err := isDeadInode(file); err != nil { - return err - } - actualPath, err := procSelfFdReadlink(file) - if err != nil { - return fmt.Errorf("get path of handle: %w", err) - } - if actualPath != path { - return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) - } - return nil -} - -// Test hooks used in the procfs tests to verify that the fallback logic works. -// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. -var ( - hookForcePrivateProcRootOpenTree = hookDummyFile - hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile - hookForceGetProcRootUnsafe = hookDummy - - hookForceProcSelfTask = hookDummy - hookForceProcSelf = hookDummy -) - -func hookDummy() bool { return false } -func hookDummyFile(_ *os.File) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index 36373f8c..4d89a481 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go index 7a14b6f4..a75a66e6 100644 --- a/vendor/github.com/docker/cli/cli/cobra.go +++ b/vendor/github.com/docker/cli/cli/cobra.go @@ -168,34 +168,30 @@ func (tcmd *TopLevelCommand) Initialize(ops ...command.CLIOption) error { } // VisitAll will traverse all commands from the root. -// This is different from the VisitAll of cobra.Command where only parents -// are checked. +// +// Deprecated: this utility was only used internally and will be removed in the next release. func VisitAll(root *cobra.Command, fn func(*cobra.Command)) { + visitAll(root, fn) +} + +func visitAll(root *cobra.Command, fn func(*cobra.Command)) { for _, cmd := range root.Commands() { - VisitAll(cmd, fn) + visitAll(cmd, fn) } fn(root) } // DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all // commands within the tree rooted at cmd. +// +// Deprecated: this utility was only used internally and will be removed in the next release. func DisableFlagsInUseLine(cmd *cobra.Command) { - VisitAll(cmd, func(ccmd *cobra.Command) { + visitAll(cmd, func(ccmd *cobra.Command) { // do not add a `[flags]` to the end of the usage line. ccmd.DisableFlagsInUseLine = true }) } -// HasCompletionArg returns true if a cobra completion arg request is found. -func HasCompletionArg(args []string) bool { - for _, arg := range args { - if arg == cobra.ShellCompRequestCmd || arg == cobra.ShellCompNoDescRequestCmd { - return true - } - } - return false -} - var helpCommand = &cobra.Command{ Use: "help [command]", Short: "Help about the command", diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go index 1e042ec0..85fac565 100644 --- a/vendor/github.com/docker/cli/cli/command/cli.go +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -282,6 +282,17 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption) } filterResourceAttributesEnvvar() + // early return if GODEBUG is already set or the docker context is + // the default context, i.e. is a virtual context where we won't override + // any GODEBUG values. + if v := os.Getenv("GODEBUG"); cli.currentContext == DefaultContextName || v != "" { + return nil + } + meta, err := cli.contextStore.GetMetadata(cli.currentContext) + if err == nil { + setGoDebug(meta) + } + return nil } @@ -475,6 +486,57 @@ func (cli *DockerCli) getDockerEndPoint() (ep docker.Endpoint, err error) { return resolveDockerEndpoint(cli.contextStore, cn) } +// setGoDebug is an escape hatch that sets the GODEBUG environment +// variable value using docker context metadata. +// +// { +// "Name": "my-context", +// "Metadata": { "GODEBUG": "x509negativeserial=1" } +// } +// +// WARNING: Setting x509negativeserial=1 allows Go's x509 library to accept +// X.509 certificates with negative serial numbers. +// This behavior is deprecated and non-compliant with current security +// standards (RFC 5280). Accepting negative serial numbers can introduce +// serious security vulnerabilities, including the risk of certificate +// collision or bypass attacks. +// This option should only be used for legacy compatibility and never in +// production environments. +// Use at your own risk. +func setGoDebug(meta store.Metadata) { + fieldName := "GODEBUG" + godebugEnv := os.Getenv(fieldName) + // early return if GODEBUG is already set. We don't want to override what + // the user already sets. + if godebugEnv != "" { + return + } + + var cfg any + var ok bool + switch m := meta.Metadata.(type) { + case DockerContext: + cfg, ok = m.AdditionalFields[fieldName] + if !ok { + return + } + case map[string]any: + cfg, ok = m[fieldName] + if !ok { + return + } + default: + return + } + + v, ok := cfg.(string) + if !ok { + return + } + // set the GODEBUG environment variable with whatever was in the context + _ = os.Setenv(fieldName, v) +} + func (cli *DockerCli) initialize() error { cli.init.Do(func() { cli.dockerEndpoint, cli.initErr = cli.getDockerEndPoint() diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go index be49d85b..dd6e74d4 100644 --- a/vendor/github.com/docker/cli/cli/command/registry.go +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -31,11 +31,13 @@ const ( // authConfigKey is the key used to store credentials for Docker Hub. It is // a copy of [registry.IndexServer]. // -// [registry.IndexServer]: https://pkg.go.dev/github.com/docker/docker/registry#IndexServer +// [registry.IndexServer]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/registry#IndexServer const authConfigKey = "https://index.docker.io/v1/" // RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info // for the given command to prompt the user for username and password. +// +// Deprecated: this function is no longer used and will be removed in the next release. func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) registrytypes.RequestAuthConfig { configKey := getAuthConfigKey(index.Name) isDefaultRegistry := configKey == authConfigKey || index.Official @@ -66,6 +68,8 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf // // It is similar to [registry.ResolveAuthConfig], but uses the credentials- // store, instead of looking up credentials from a map. +// +// [registry.ResolveAuthConfig]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/registry#ResolveAuthConfig func ResolveAuthConfig(cfg *configfile.ConfigFile, index *registrytypes.IndexInfo) registrytypes.AuthConfig { configKey := index.Name if index.Official { @@ -97,23 +101,6 @@ func GetDefaultAuthConfig(cfg *configfile.ConfigFile, checkCredStore bool, serve return registrytypes.AuthConfig(authconfig), nil } -// ConfigureAuth handles prompting of user's username and password if needed. -// -// Deprecated: use [PromptUserForCredentials] instead. -func ConfigureAuth(ctx context.Context, cli Cli, flUser, flPassword string, authConfig *registrytypes.AuthConfig, _ bool) error { - defaultUsername := authConfig.Username - serverAddress := authConfig.ServerAddress - - newAuthConfig, err := PromptUserForCredentials(ctx, cli, flUser, flPassword, defaultUsername, serverAddress) - if err != nil { - return err - } - - authConfig.Username = newAuthConfig.Username - authConfig.Password = newAuthConfig.Password - return nil -} - // PromptUserForCredentials handles the CLI prompt for the user to input // credentials. // If argUser is not empty, then the user is only prompted for their password. @@ -209,47 +196,38 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword }, nil } -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete -// image. The auth configuration is serialized as a base64url encoded RFC4648, -// section 5) JSON string for sending through the X-Registry-Auth header. +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a +// complete image reference. The auth configuration is serialized as a +// base64url encoded ([RFC 4648, Section 5]) JSON string for sending through +// the "X-Registry-Auth" header. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC 4648, Section 5]: https://tools.ietf.org/html/rfc4648#section-5 func RetrieveAuthTokenFromImage(cfg *configfile.ConfigFile, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := resolveAuthConfigFromImage(cfg, image) + registryRef, err := reference.ParseNormalizedNamed(image) if err != nil { return "", err } - encodedAuth, err := registrytypes.EncodeAuthConfig(authConfig) + configKey := getAuthConfigKey(reference.Domain(registryRef)) + authConfig, err := cfg.GetAuthConfig(configKey) + if err != nil { + return "", err + } + + encodedAuth, err := registrytypes.EncodeAuthConfig(registrytypes.AuthConfig(authConfig)) if err != nil { return "", err } return encodedAuth, nil } -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func resolveAuthConfigFromImage(cfg *configfile.ConfigFile, image string) (registrytypes.AuthConfig, error) { - registryRef, err := reference.ParseNormalizedNamed(image) - if err != nil { - return registrytypes.AuthConfig{}, err - } - configKey := getAuthConfigKey(reference.Domain(registryRef)) - a, err := cfg.GetAuthConfig(configKey) - if err != nil { - return registrytypes.AuthConfig{}, err - } - return registrytypes.AuthConfig(a), nil -} - // getAuthConfigKey special-cases using the full index address of the official // index as the AuthConfig key, and uses the (host)name[:port] for private indexes. // // It is similar to [registry.GetAuthConfigKey], but does not require on // [registrytypes.IndexInfo] as intermediate. // -// [registry.GetAuthConfigKey]: https://pkg.go.dev/github.com/docker/docker/registry#GetAuthConfigKey -// [registrytypes.IndexInfo]:https://pkg.go.dev/github.com/docker/docker/api/types/registry#IndexInfo +// [registry.GetAuthConfigKey]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/registry#GetAuthConfigKey +// [registrytypes.IndexInfo]: https://pkg.go.dev/github.com/docker/docker@v28.3.3+incompatible/api/types/registry#IndexInfo func getAuthConfigKey(domainName string) string { if domainName == "docker.io" || domainName == "index.docker.io" { return authConfigKey diff --git a/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go b/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go index 07b322f0..7cfe8a89 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go +++ b/vendor/github.com/docker/cli/cli/command/stack/formatter/formatter.go @@ -8,21 +8,31 @@ import ( const ( // SwarmStackTableFormat is the default Swarm stack format + // + // Deprecated: this type was for internal use and will be removed in the next release. SwarmStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}" stackServicesHeader = "SERVICES" // TableFormatKey is an alias for formatter.TableFormatKey + // + // Deprecated: this type was for internal use and will be removed in the next release. TableFormatKey = formatter.TableFormatKey ) // Context is an alias for formatter.Context +// +// Deprecated: this type was for internal use and will be removed in the next release. type Context = formatter.Context // Format is an alias for formatter.Format +// +// Deprecated: this type was for internal use and will be removed in the next release. type Format = formatter.Format // Stack contains deployed stack information. +// +// Deprecated: this type was for internal use and will be removed in the next release. type Stack struct { // Name is the name of the stack Name string @@ -31,6 +41,8 @@ type Stack struct { } // StackWrite writes formatted stacks using the Context +// +// Deprecated: this function was for internal use and will be removed in the next release. func StackWrite(ctx formatter.Context, stacks []*Stack) error { render := func(format func(subContext formatter.SubContext) error) error { for _, stack := range stacks { diff --git a/vendor/github.com/docker/cli/cli/command/trust.go b/vendor/github.com/docker/cli/cli/command/trust.go deleted file mode 100644 index 65f24085..00000000 --- a/vendor/github.com/docker/cli/cli/command/trust.go +++ /dev/null @@ -1,15 +0,0 @@ -package command - -import ( - "github.com/spf13/pflag" -) - -// AddTrustVerificationFlags adds content trust flags to the provided flagset -func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) { - fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification") -} - -// AddTrustSigningFlags adds "signing" flags to the provided flagset -func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) { - fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing") -} diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go index ab64ef8f..fdedc2a1 100644 --- a/vendor/github.com/docker/cli/cli/command/utils.go +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -14,30 +14,20 @@ import ( "github.com/docker/cli/cli/streams" "github.com/docker/cli/internal/prompt" "github.com/docker/docker/api/types/filters" - "github.com/moby/sys/atomicwriter" "github.com/pkg/errors" - "github.com/spf13/pflag" ) -// CopyToFile writes the content of the reader to the specified file +// ErrPromptTerminated is returned if the user terminated the prompt. // -// Deprecated: use [atomicwriter.New]. -func CopyToFile(outfile string, r io.Reader) error { - writer, err := atomicwriter.New(outfile, 0o600) - if err != nil { - return err - } - defer writer.Close() - _, err = io.Copy(writer, r) - return err -} - +// Deprecated: this error is for internal use and will be removed in the next release. const ErrPromptTerminated = prompt.ErrTerminated // DisableInputEcho disables input echo on the provided streams.In. // This is useful when the user provides sensitive information like passwords. // The function returns a restore function that should be called to restore the // terminal state. +// +// Deprecated: this function is for internal use and will be removed in the next release. func DisableInputEcho(ins *streams.In) (restore func() error, err error) { return prompt.DisableInputEcho(ins) } @@ -49,6 +39,8 @@ func DisableInputEcho(ins *streams.In) (restore func() error, err error) { // When the prompt returns an error, the caller should propagate the error up // the stack and close the io.Reader used for the prompt which will prevent the // background goroutine from blocking indefinitely. +// +// Deprecated: this function is for internal use and will be removed in the next release. func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message string) (string, error) { return prompt.ReadInput(ctx, in, out, message) } @@ -63,6 +55,8 @@ func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message st // When the prompt returns an error, the caller should propagate the error up // the stack and close the io.Reader used for the prompt which will prevent the // background goroutine from blocking indefinitely. +// +// Deprecated: this function is for internal use and will be removed in the next release. func PromptForConfirmation(ctx context.Context, ins io.Reader, outs io.Writer, message string) (bool, error) { return prompt.Confirm(ctx, ins, outs, message) } @@ -108,12 +102,6 @@ func PruneFilters(dockerCLI config.Provider, pruneFilters filters.Args) filters. return pruneFilters } -// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later. -func AddPlatformFlag(flags *pflag.FlagSet, target *string) { - flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") - _ = flags.SetAnnotation("platform", "version", []string{"1.32"}) -} - // ValidateOutputPath validates the output paths of the "docker cp" command. func ValidateOutputPath(path string) error { dir := filepath.Dir(filepath.Clean(path)) diff --git a/vendor/github.com/docker/cli/opts/envfile.go b/vendor/github.com/docker/cli/cli/compose/loader/envfile.go similarity index 54% rename from vendor/github.com/docker/cli/opts/envfile.go rename to vendor/github.com/docker/cli/cli/compose/loader/envfile.go index 3a16e6c1..6430f4b5 100644 --- a/vendor/github.com/docker/cli/opts/envfile.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/envfile.go @@ -1,4 +1,4 @@ -package opts +package loader import ( "os" @@ -6,19 +6,21 @@ import ( "github.com/docker/cli/pkg/kvfile" ) -// ParseEnvFile reads a file with environment variables enumerated by lines +// parseEnvFile reads a file with environment variables enumerated by lines // // “Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// Utilities volume of [IEEE Std 1003.1-2001] consist solely of uppercase // letters, digits, and the '_' (underscore) from the characters defined in // Portable Character Set and do not begin with a digit. *But*, other // characters may be permitted by an implementation; applications shall // tolerate the presence of such names.” -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html // -// As of #16585, it's up to application inside docker to validate or not +// As of [moby-16585], it's up to application inside docker to validate or not // environment variables, that's why we just strip leading whitespace and // nothing more. -func ParseEnvFile(filename string) ([]string, error) { +// +// [IEEE Std 1003.1-2001]: http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// [moby-16585]: https://github.com/moby/moby/issues/16585 +func parseEnvFile(filename string) ([]string, error) { return kvfile.Parse(filename, os.LookupEnv) } diff --git a/vendor/github.com/docker/cli/cli/compose/loader/loader.go b/vendor/github.com/docker/cli/cli/compose/loader/loader.go index b2673394..3b788ca0 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/loader.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/loader.go @@ -17,6 +17,7 @@ import ( "github.com/docker/cli/cli/compose/schema" "github.com/docker/cli/cli/compose/template" "github.com/docker/cli/cli/compose/types" + "github.com/docker/cli/internal/volumespec" "github.com/docker/cli/opts" "github.com/docker/cli/opts/swarmopts" "github.com/docker/docker/api/types/versions" @@ -41,6 +42,13 @@ type Options struct { discardEnvFiles bool } +// ParseVolume parses a volume spec without any knowledge of the target platform. +// +// This function is unused, but kept for backward-compatibility for external users. +func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { + return volumespec.Parse(spec) +} + // WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to // the `environment` section func WithDiscardEnvFiles(options *Options) { @@ -460,7 +468,7 @@ func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, l for _, file := range serviceConfig.EnvFile { filePath := absPath(workingDir, file) - fileVars, err := opts.ParseEnvFile(filePath) + fileVars, err := parseEnvFile(filePath) if err != nil { return err } @@ -756,7 +764,7 @@ var transformBuildConfig TransformerFunc = func(data any) (any, error) { var transformServiceVolumeConfig TransformerFunc = func(data any) (any, error) { switch value := data.(type) { case string: - return ParseVolume(value) + return volumespec.Parse(value) case map[string]any: return data, nil default: diff --git a/vendor/github.com/docker/cli/cli/compose/types/types.go b/vendor/github.com/docker/cli/cli/compose/types/types.go index 0804388a..9d0c11b4 100644 --- a/vendor/github.com/docker/cli/cli/compose/types/types.go +++ b/vendor/github.com/docker/cli/cli/compose/types/types.go @@ -8,6 +8,8 @@ import ( "fmt" "strconv" "time" + + "github.com/docker/cli/internal/volumespec" ) // UnsupportedProperties not yet supported by this implementation of the compose file @@ -390,43 +392,23 @@ type ServicePortConfig struct { } // ServiceVolumeConfig are references to a volume used by a service -type ServiceVolumeConfig struct { - Type string `yaml:",omitempty" json:"type,omitempty"` - Source string `yaml:",omitempty" json:"source,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` - Consistency string `yaml:",omitempty" json:"consistency,omitempty"` - Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"` - Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` - Image *ServiceVolumeImage `yaml:",omitempty" json:"image,omitempty"` - Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"` - Cluster *ServiceVolumeCluster `yaml:",omitempty" json:"cluster,omitempty"` -} +type ServiceVolumeConfig = volumespec.VolumeConfig // ServiceVolumeBind are options for a service volume of type bind -type ServiceVolumeBind struct { - Propagation string `yaml:",omitempty" json:"propagation,omitempty"` -} +type ServiceVolumeBind = volumespec.BindOpts // ServiceVolumeVolume are options for a service volume of type volume -type ServiceVolumeVolume struct { - NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` - Subpath string `mapstructure:"subpath" yaml:"subpath,omitempty" json:"subpath,omitempty"` -} +type ServiceVolumeVolume = volumespec.VolumeOpts // ServiceVolumeImage are options for a service volume of type image -type ServiceVolumeImage struct { - Subpath string `mapstructure:"subpath" yaml:"subpath,omitempty" json:"subpath,omitempty"` -} +type ServiceVolumeImage = volumespec.ImageOpts // ServiceVolumeTmpfs are options for a service volume of type tmpfs -type ServiceVolumeTmpfs struct { - Size int64 `yaml:",omitempty" json:"size,omitempty"` -} +type ServiceVolumeTmpfs = volumespec.TmpFsOpts // ServiceVolumeCluster are options for a service volume of type cluster. // Deliberately left blank for future options, but unused now. -type ServiceVolumeCluster struct{} +type ServiceVolumeCluster = volumespec.ClusterOpts // FileReferenceConfig for a reference to a swarm file object type FileReferenceConfig struct { diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go index 056af6b8..95eb27c8 100644 --- a/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -7,8 +7,8 @@ type AuthConfig struct { Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. + // + // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` diff --git a/vendor/github.com/docker/cli/cli/context/store/errors.go b/vendor/github.com/docker/cli/cli/context/store/errors.go index e85ce325..cabc3f7a 100644 --- a/vendor/github.com/docker/cli/cli/context/store/errors.go +++ b/vendor/github.com/docker/cli/cli/context/store/errors.go @@ -1,9 +1,9 @@ package store -import cerrdefs "github.com/containerd/errdefs" +import "github.com/containerd/errdefs" func invalidParameter(err error) error { - if err == nil || cerrdefs.IsInvalidArgument(err) { + if err == nil || errdefs.IsInvalidArgument(err) { return err } return invalidParameterErr{err} @@ -14,7 +14,7 @@ type invalidParameterErr struct{ error } func (invalidParameterErr) InvalidParameter() {} func notFound(err error) error { - if err == nil || cerrdefs.IsNotFound(err) { + if err == nil || errdefs.IsNotFound(err) { return err } return notFoundErr{err} diff --git a/vendor/github.com/docker/cli/cli/flags/options.go b/vendor/github.com/docker/cli/cli/flags/options.go index fc168984..309cb461 100644 --- a/vendor/github.com/docker/cli/cli/flags/options.go +++ b/vendor/github.com/docker/cli/cli/flags/options.go @@ -1,12 +1,12 @@ package flags import ( + "errors" "fmt" "os" "path/filepath" "github.com/docker/cli/cli/config" - "github.com/docker/cli/opts" "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" "github.com/sirupsen/logrus" @@ -54,6 +54,39 @@ var ( dockerTLS = os.Getenv(EnvEnableTLS) != "" ) +// hostVar is used for the '--host' / '-H' flag to set [ClientOptions.Hosts]. +// The [ClientOptions.Hosts] field is a slice because it was originally shared +// with the daemon config. However, the CLI only allows for a single host to +// be specified. +// +// hostVar presents itself as a "string", but stores the value in a string +// slice. It produces an error when trying to set multiple values, matching +// the check in [getServerHost]. +// +// [getServerHost]: https://github.com/docker/cli/blob/7eab668982645def1cd46fe1b60894cba6fd17a4/cli/command/cli.go#L542-L551 +type hostVar struct { + dst *[]string + set bool +} + +func (h *hostVar) String() string { + if h.dst == nil || len(*h.dst) == 0 { + return "" + } + return (*h.dst)[0] +} + +func (h *hostVar) Set(s string) error { + if h.set { + return errors.New("specify only one -H") + } + *h.dst = []string{s} + h.set = true + return nil +} + +func (*hostVar) Type() string { return "string" } + // ClientOptions are the options used to configure the client cli. type ClientOptions struct { Debug bool @@ -90,13 +123,13 @@ func (o *ClientOptions) InstallFlags(flags *pflag.FlagSet) { KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), } tlsOptions := o.TLSOptions - flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") - flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") - flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + flags.Var("edString{&tlsOptions.CAFile}, "tlscacert", "Trust certs signed only by this CA") + flags.Var("edString{&tlsOptions.CertFile}, "tlscert", "Path to TLS certificate file") + flags.Var("edString{&tlsOptions.KeyFile}, "tlskey", "Path to TLS key file") - // opts.ValidateHost is not used here, so as to allow connection helpers - hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, nil) - flags.VarP(hostOpt, "host", "H", "Daemon socket to connect to") + // TODO(thaJeztah): show the default host. + // TODO(thaJeztah): this should be a string, not an "array" as we only allow a single host. + flags.VarP(&hostVar{dst: &o.Hosts}, "host", "H", "Daemon socket to connect to") flags.StringVarP(&o.Context, "context", "c", "", `Name of the context to use to connect to the daemon (overrides `+client.EnvOverrideHost+` env var and default context set with "docker context use")`) } @@ -146,3 +179,33 @@ func SetLogLevel(logLevel string) { logrus.SetLevel(logrus.InfoLevel) } } + +type quotedString struct { + value *string +} + +func (s *quotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +func (*quotedString) Type() string { + return "string" +} + +func (s *quotedString) String() string { + return *s.value +} + +func trimQuotes(value string) string { + if len(value) < 2 { + return value + } + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} diff --git a/vendor/github.com/docker/cli/internal/volumespec/types.go b/vendor/github.com/docker/cli/internal/volumespec/types.go new file mode 100644 index 00000000..7eb9a500 --- /dev/null +++ b/vendor/github.com/docker/cli/internal/volumespec/types.go @@ -0,0 +1,40 @@ +package volumespec + +// VolumeConfig are references to a volume used by a service +type VolumeConfig struct { + Type string `yaml:",omitempty" json:"type,omitempty"` + Source string `yaml:",omitempty" json:"source,omitempty"` + Target string `yaml:",omitempty" json:"target,omitempty"` + ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` + Consistency string `yaml:",omitempty" json:"consistency,omitempty"` + Bind *BindOpts `yaml:",omitempty" json:"bind,omitempty"` + Volume *VolumeOpts `yaml:",omitempty" json:"volume,omitempty"` + Image *ImageOpts `yaml:",omitempty" json:"image,omitempty"` + Tmpfs *TmpFsOpts `yaml:",omitempty" json:"tmpfs,omitempty"` + Cluster *ClusterOpts `yaml:",omitempty" json:"cluster,omitempty"` +} + +// BindOpts are options for a service volume of type bind +type BindOpts struct { + Propagation string `yaml:",omitempty" json:"propagation,omitempty"` +} + +// VolumeOpts are options for a service volume of type volume +type VolumeOpts struct { + NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` + Subpath string `mapstructure:"subpath" yaml:"subpath,omitempty" json:"subpath,omitempty"` +} + +// ImageOpts are options for a service volume of type image +type ImageOpts struct { + Subpath string `mapstructure:"subpath" yaml:"subpath,omitempty" json:"subpath,omitempty"` +} + +// TmpFsOpts are options for a service volume of type tmpfs +type TmpFsOpts struct { + Size int64 `yaml:",omitempty" json:"size,omitempty"` +} + +// ClusterOpts are options for a service volume of type cluster. +// Deliberately left blank for future options, but unused now. +type ClusterOpts struct{} diff --git a/vendor/github.com/docker/cli/cli/compose/loader/volume.go b/vendor/github.com/docker/cli/internal/volumespec/volumespec.go similarity index 82% rename from vendor/github.com/docker/cli/cli/compose/loader/volume.go rename to vendor/github.com/docker/cli/internal/volumespec/volumespec.go index f043f4aa..0ae4f31c 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/volume.go +++ b/vendor/github.com/docker/cli/internal/volumespec/volumespec.go @@ -1,20 +1,19 @@ -package loader +package volumespec import ( "strings" "unicode" "unicode/utf8" - "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/mount" "github.com/pkg/errors" ) const endOfSpec = rune(0) -// ParseVolume parses a volume spec without any knowledge of the target platform -func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { - volume := types.ServiceVolumeConfig{} +// Parse parses a volume spec without any knowledge of the target platform +func Parse(spec string) (VolumeConfig, error) { + volume := VolumeConfig{} switch len(spec) { case 0: @@ -49,7 +48,7 @@ func isWindowsDrive(buffer []rune, char rune) bool { return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0]) } -func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error { +func populateFieldFromBuffer(char rune, buffer []rune, volume *VolumeConfig) error { strBuffer := string(buffer) switch { case len(buffer) == 0: @@ -74,10 +73,10 @@ func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolu case "rw": volume.ReadOnly = false case "nocopy": - volume.Volume = &types.ServiceVolumeVolume{NoCopy: true} + volume.Volume = &VolumeOpts{NoCopy: true} default: if isBindOption(option) { - volume.Bind = &types.ServiceVolumeBind{Propagation: option} + volume.Bind = &BindOpts{Propagation: option} } // ignore unknown options } @@ -94,7 +93,7 @@ func isBindOption(option string) bool { return false } -func populateType(volume *types.ServiceVolumeConfig) { +func populateType(volume *VolumeConfig) { switch { // Anonymous volume case volume.Source == "": diff --git a/vendor/github.com/docker/cli/opts/env.go b/vendor/github.com/docker/cli/opts/env.go index 675ddda9..2a21394b 100644 --- a/vendor/github.com/docker/cli/opts/env.go +++ b/vendor/github.com/docker/cli/opts/env.go @@ -7,13 +7,14 @@ import ( ) // ValidateEnv validates an environment variable and returns it. -// If no value is specified, it obtains its value from the current environment +// If no value is specified, it obtains its value from the current environment. // -// As on ParseEnvFile and related to #16585, environment variable names -// are not validated, and it's up to the application inside the container -// to validate them or not. +// Environment variable names are not validated, and it's up to the application +// inside the container to validate them (see [moby-16585]). The only validation +// here is to check if name is empty, per [moby-25099]. // -// The only validation here is to check if name is empty, per #25099 +// [moby-16585]: https://github.com/moby/moby/issues/16585 +// [moby-25099]: https://github.com/moby/moby/issues/25099 func ValidateEnv(val string) (string, error) { k, _, hasValue := strings.Cut(val, "=") if k == "" { diff --git a/vendor/github.com/docker/cli/opts/envfile_deprecated.go b/vendor/github.com/docker/cli/opts/envfile_deprecated.go new file mode 100644 index 00000000..f2f10b18 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/envfile_deprecated.go @@ -0,0 +1,14 @@ +package opts + +import ( + "os" + + "github.com/docker/cli/pkg/kvfile" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// Deprecated: use [kvfile.Parse] and pass [os.LookupEnv] to lookup env-vars from the current environment. +func ParseEnvFile(filename string) ([]string, error) { + return kvfile.Parse(filename, os.LookupEnv) +} diff --git a/vendor/github.com/docker/cli/opts/hosts.go b/vendor/github.com/docker/cli/opts/hosts.go index 552ab6b4..87e1a1da 100644 --- a/vendor/github.com/docker/cli/opts/hosts.go +++ b/vendor/github.com/docker/cli/opts/hosts.go @@ -34,7 +34,7 @@ const ( // ValidateHost validates that the specified string is a valid host and returns it. // -// TODO(thaJeztah): ValidateHost appears to be unused; deprecate it. +// Deprecated: this function is no longer used, and will be removed in the next release. func ValidateHost(val string) (string, error) { host := strings.TrimSpace(val) // The empty string means default and is not handled by parseDockerDaemonHost diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go index 1a885db3..94eda056 100644 --- a/vendor/github.com/docker/cli/opts/opts.go +++ b/vendor/github.com/docker/cli/opts/opts.go @@ -134,6 +134,8 @@ func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { // NamedOption is an interface that list and map options // with names implement. +// +// Deprecated: NamedOption is no longer used and will be removed in the next release. type NamedOption interface { Name() string } @@ -141,6 +143,8 @@ type NamedOption interface { // NamedListOpts is a ListOpts with a configuration name. // This struct is useful to keep reference to the assigned // field name in the internal configuration struct. +// +// Deprecated: NamedListOpts is no longer used and will be removed in the next release. type NamedListOpts struct { name string ListOpts @@ -149,6 +153,8 @@ type NamedListOpts struct { var _ NamedOption = &NamedListOpts{} // NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +// +// Deprecated: NewNamedListOptsRef is no longer used and will be removed in the next release. func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { return &NamedListOpts{ name: name, @@ -157,6 +163,8 @@ func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctTy } // Name returns the name of the NamedListOpts in the configuration. +// +// Deprecated: NamedListOpts is no longer used and will be removed in the next release. func (o *NamedListOpts) Name() string { return o.name } @@ -210,6 +218,8 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { // NamedMapOpts is a MapOpts struct with a configuration name. // This struct is useful to keep reference to the assigned // field name in the internal configuration struct. +// +// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. type NamedMapOpts struct { name string MapOpts @@ -218,6 +228,8 @@ type NamedMapOpts struct { var _ NamedOption = &NamedMapOpts{} // NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +// +// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { return &NamedMapOpts{ name: name, @@ -226,6 +238,8 @@ func NewNamedMapOpts(name string, values map[string]string, validator ValidatorF } // Name returns the name of the NamedMapOpts in the configuration. +// +// Deprecated: NamedMapOpts is no longer used and will be removed in the next release. func (o *NamedMapOpts) Name() string { return o.name } diff --git a/vendor/github.com/docker/cli/opts/quotedstring.go b/vendor/github.com/docker/cli/opts/quotedstring.go index eb2ac7fb..d1d8b09a 100644 --- a/vendor/github.com/docker/cli/opts/quotedstring.go +++ b/vendor/github.com/docker/cli/opts/quotedstring.go @@ -2,6 +2,8 @@ package opts // QuotedString is a string that may have extra quotes around the value. The // quotes are stripped from the value. +// +// Deprecated: This option type is no longer used and will be removed in the next release. type QuotedString struct { value *string } @@ -35,6 +37,8 @@ func trimQuotes(value string) string { } // NewQuotedString returns a new quoted string option +// +// Deprecated: This option type is no longer used and will be removed in the next release. func NewQuotedString(value *string) *QuotedString { return &QuotedString{value: value} } diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 3880635d..1401fa71 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -1608,6 +1608,8 @@ definitions: Bridge: description: | Name of the default bridge interface when dockerd's --bridge flag is set. + + Deprecated: This field is only set when the daemon is started with the --bridge flag specified. type: "string" example: "docker0" SandboxID: @@ -2234,6 +2236,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -4392,6 +4398,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. diff --git a/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/vendor/github.com/docker/docker/api/types/build/disk_usage.go index e969b6d6..cfd73332 100644 --- a/vendor/github.com/docker/docker/api/types/build/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/build/disk_usage.go @@ -1,6 +1,8 @@ package build // CacheDiskUsage contains disk usage for the build cache. +// +// Deprecated: this type is no longer used and will be removed in the next release. type CacheDiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/vendor/github.com/docker/docker/api/types/container/disk_usage.go index 05b6cbe9..d77538c2 100644 --- a/vendor/github.com/docker/docker/api/types/container/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/container/disk_usage.go @@ -1,6 +1,8 @@ package container // DiskUsage contains disk usage for containers. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go index afec0e54..687145f2 100644 --- a/vendor/github.com/docker/docker/api/types/container/network_settings.go +++ b/vendor/github.com/docker/docker/api/types/container/network_settings.go @@ -13,8 +13,11 @@ type NetworkSettings struct { } // NetworkSettingsBase holds networking state for a container when inspecting it. +// +// Deprecated: Most fields in NetworkSettingsBase are deprecated. Fields which aren't deprecated will move to +// NetworkSettings in v29.0, and this struct will be removed. type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + Bridge string // Deprecated: This field is only set when the daemon is started with the --bridge flag specified. SandboxID string // SandboxID uniquely represents a container's network stack SandboxKey string // SandboxKey identifies the sandbox Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port @@ -35,18 +38,44 @@ type NetworkSettingsBase struct { SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. } -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. +// DefaultNetworkSettings holds the networking state for the default bridge, if the container is connected to that +// network. +// +// Deprecated: this struct is deprecated since Docker v1.11 and will be removed in v29. You should look for the default +// network in NetworkSettings.Networks instead. type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network + // EndpointID uniquely represents a service endpoint in a Sandbox + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + EndpointID string + // Gateway holds the gateway address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + Gateway string + // GlobalIPv6Address holds network's global IPv6 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + GlobalIPv6Address string + // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + GlobalIPv6PrefixLen int + // IPAddress holds the IPv4 address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPAddress string + // IPPrefixLen represents mask length of network's IPv4 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPPrefixLen int + // IPv6Gateway holds gateway address specific for IPv6 + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPv6Gateway string + // MacAddress holds the MAC address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + MacAddress string } // NetworkSettingsSummary provides a summary of container's networks diff --git a/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go b/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go new file mode 100644 index 00000000..4504cd7a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go @@ -0,0 +1,61 @@ +package filters + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/versions" +) + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + out, err := ToJSON(a) + if out == "" || err != nil { + return "", nil + } + if version != "" && versions.LessThan(version, "1.22") { + return encodeLegacyFilters(out) + } + return out, nil +} + +// encodeLegacyFilters encodes Args in the legacy format as used in API v1.21 and older. +// where values are a list of strings, instead of a set. +// +// Don't use in any new code; use [filters.ToJSON]] instead. +func encodeLegacyFilters(currentFormat string) (string, error) { + // The Args.fields field is not exported, but used to marshal JSON, + // so we'll marshal to the new format, then unmarshal to get the + // fields, and marshal again. + // + // This is far from optimal, but this code is only used for deprecated + // API versions, so should not be hit commonly. + var argsFields map[string]map[string]bool + err := json.Unmarshal([]byte(currentFormat), &argsFields) + if err != nil { + return "", err + } + + buf, err := json.Marshal(convertArgsToSlice(argsFields)) + if err != nil { + return "", err + } + return string(buf), nil +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 86f4bdb2..396657bb 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -8,8 +8,6 @@ import ( "encoding/json" "regexp" "strings" - - "github.com/docker/docker/api/types/versions" ) // Args stores a mapping of keys to a set of multiple values. @@ -63,24 +61,6 @@ func ToJSON(a Args) (string, error) { return string(buf), err } -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -320,17 +300,3 @@ func deprecatedArgs(d map[string][]string) map[string]map[string]bool { } return m } - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/vendor/github.com/docker/docker/api/types/image/disk_usage.go index b29d925c..e847386a 100644 --- a/vendor/github.com/docker/docker/api/types/image/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/image/disk_usage.go @@ -1,6 +1,8 @@ package image // DiskUsage contains disk usage for images. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 167ac70a..cdc06c6c 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "net" - - "github.com/docker/docker/internal/multierror" ) // EndpointSettings stores the network endpoint details @@ -99,7 +97,7 @@ func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets [] errs = append(errs, err) } - return multierror.Join(errs...) + return errJoin(errs...) } func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { @@ -149,5 +147,5 @@ func (cfg *EndpointIPAMConfig) Validate() error { } } - return multierror.Join(errs...) + return errJoin(errs...) } diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go index f319e140..f9a9ff9b 100644 --- a/vendor/github.com/docker/docker/api/types/network/ipam.go +++ b/vendor/github.com/docker/docker/api/types/network/ipam.go @@ -4,8 +4,7 @@ import ( "errors" "fmt" "net/netip" - - "github.com/docker/docker/internal/multierror" + "strings" ) // IPAM represents IP Address Management @@ -72,7 +71,7 @@ func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { } } - if err := multierror.Join(errs...); err != nil { + if err := errJoin(errs...); err != nil { return fmt.Errorf("invalid network config:\n%w", err) } @@ -132,3 +131,43 @@ func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) return nil } + +func errJoin(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + if len(e.errs) == 1 { + return strings.TrimSpace(e.errs[0].Error()) + } + stringErrs := make([]string, 0, len(e.errs)) + for _, subErr := range e.errs { + stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) + } + return "* " + strings.Join(stringErrs, "\n* ") +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index fa9037bd..4c6d7ab2 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -32,8 +32,8 @@ type AuthConfig struct { Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. + // + // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go index 8a28320f..3fda4ca6 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -1,5 +1,7 @@ package swarm +import "github.com/docker/docker/api/types/swarm/runtime" + // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string @@ -25,3 +27,11 @@ const ( type NetworkAttachmentSpec struct { ContainerID string } + +// RuntimeSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type RuntimeSpec = runtime.PluginSpec + +// RuntimePrivilege describes a permission the user has to accept +// upon installing a plugin. +type RuntimePrivilege = runtime.PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 90e572cf..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 32aaf0d5..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,808 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -package runtime - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{0} -} -func (m *PluginSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginSpec.Merge(m, src) -} -func (m *PluginSpec) XXX_Size() int { - return m.Size() -} -func (m *PluginSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PluginSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginSpec proto.InternalMessageInfo - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{1} -} -func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginPrivilege) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginPrivilege.Merge(m, src) -} -func (m *PluginPrivilege) XXX_Size() int { - return m.Size() -} -func (m *PluginPrivilege) XXX_DiscardUnknown() { - xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } - -var fileDescriptor_22a625af4bc1cc87 = []byte{ - // 225 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, - 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, - 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, - 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, - 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, - 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, - 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, - 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, - 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, - 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, - 0x00, -} - -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Privileges) > 0 { - for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlugin(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Remote) > 0 { - i -= len(m.Remote) - copy(dAtA[i:], m.Remote) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - offset -= sovPlugin(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlugin - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlugin - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index e311b36b..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go new file mode 100644 index 00000000..95176b26 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go @@ -0,0 +1,27 @@ +package runtime + +import "fmt" + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `json:"name,omitempty"` + Remote string `json:"remote,omitempty"` + Privileges []*PluginPrivilege `json:"privileges,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Env []string `json:"env,omitempty"` +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Value []string `json:"value,omitempty"` +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") // Deprecated: this error was only used internally and is no longer used. + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") // Deprecated: this error was only used internally and is no longer used. + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") // Deprecated: this error was only used internally and is no longer used. +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 4dc95e8b..e143f844 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -4,7 +4,6 @@ import ( "time" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm/runtime" ) // TaskState represents the state of a task. @@ -77,7 +76,7 @@ type TaskSpec struct { // NetworkAttachmentSpec is used if the `Runtime` field is set to // `attachment`. ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` + PluginSpec *RuntimeSpec `json:",omitempty"` NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/vendor/github.com/docker/docker/api/types/system/disk_usage.go deleted file mode 100644 index 99078cf1..00000000 --- a/vendor/github.com/docker/docker/api/types/system/disk_usage.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/volume" -) - -// DiskUsage contains response of Engine API for API 1.49 and greater: -// GET "/system/df" -type DiskUsage struct { - Images *image.DiskUsage - Containers *container.DiskUsage - Volumes *volume.DiskUsage - BuildCache *build.CacheDiskUsage -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 8456a456..c9c20b87 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -46,15 +46,16 @@ type NetworkSettings = container.NetworkSettings // NetworkSettingsBase holds networking state for a container when inspecting it. // -// Deprecated: use [container.NetworkSettingsBase]. -type NetworkSettingsBase = container.NetworkSettingsBase +// Deprecated: [container.NetworkSettingsBase] will be removed in v29. Prefer +// accessing the fields it contains through [container.NetworkSettings]. +type NetworkSettingsBase = container.NetworkSettingsBase //nolint:staticcheck // ignore SA1019: NetworkSettingsBase is deprecated in v28.4. // DefaultNetworkSettings holds network information // during the 2 release deprecation period. // It will be removed in Docker 1.11. // // Deprecated: use [container.DefaultNetworkSettings]. -type DefaultNetworkSettings = container.DefaultNetworkSettings +type DefaultNetworkSettings = container.DefaultNetworkSettings //nolint:staticcheck // ignore SA1019: DefaultNetworkSettings is deprecated in v28.4. // SummaryNetworkSettings provides a summary of container's networks // in /containers/json. diff --git a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go index 3d716c6e..88974303 100644 --- a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go @@ -1,6 +1,8 @@ package volume // DiskUsage contains disk usage for volumes. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index d6e014dd..8acfb7f4 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -463,7 +463,9 @@ func (cli *Client) dialer() func(context.Context) (net.Conn, error) { case "unix": return net.Dial(cli.proto, cli.addr) case "npipe": - return sockets.DialPipe(cli.addr, 32*time.Second) + ctx, cancel := context.WithTimeout(ctx, 32*time.Second) + defer cancel() + return dialPipeContext(ctx, cli.addr) default: if tlsConfig := cli.tlsConfig(); tlsConfig != nil { return tls.Dial(cli.proto, cli.addr, tlsConfig) diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index e5b921b4..1fb9fbfb 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -2,6 +2,17 @@ package client +import ( + "context" + "net" + "syscall" +) + // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "unix:///var/run/docker.sock" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(_ context.Context, _ string) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index 19b954b2..b471c061 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,5 +1,17 @@ package client +import ( + "context" + "net" + + "github.com/Microsoft/go-winio" +) + // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(ctx context.Context, addr string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 2244e0f4..076954f4 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -28,7 +28,7 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return container.StatsResponseReader{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } @@ -51,6 +51,6 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string return container.StatsResponseReader{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 66ca75e4..1ed0878b 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -40,7 +40,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return build.ImageBuildResponse{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 67e1e693..7b82f185 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -8,12 +8,9 @@ import ( cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/internal/lazyregexp" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`) - type emptyIDError string func (e emptyIDError) InvalidParameter() {} @@ -31,16 +28,6 @@ func trimID(objType, id string) (string, error) { return id, nil } -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - // getFiltersQuery returns a url query with "filters" query term, based on the // filters provided. func getFiltersQuery(f filters.Args) (url.Values, error) { diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go deleted file mode 100644 index 6334edb6..00000000 --- a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code below was largely copied from golang.org/x/mod@v0.22; -// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go -// with some additional methods added. - -// Package lazyregexp is a thin wrapper over regexp, allowing the use of global -// regexp variables without forcing them to be compiled at init. -package lazyregexp - -import ( - "os" - "regexp" - "strings" - "sync" -) - -// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be -// compiled the first time it is needed. -type Regexp struct { - str string - once sync.Once - rx *regexp.Regexp -} - -func (r *Regexp) re() *regexp.Regexp { - r.once.Do(r.build) - return r.rx -} - -func (r *Regexp) build() { - r.rx = regexp.MustCompile(r.str) - r.str = "" -} - -func (r *Regexp) FindSubmatch(s []byte) [][]byte { - return r.re().FindSubmatch(s) -} - -func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - return r.re().FindAllStringSubmatch(s, n) -} - -func (r *Regexp) FindStringSubmatch(s string) []string { - return r.re().FindStringSubmatch(s) -} - -func (r *Regexp) FindStringSubmatchIndex(s string) []int { - return r.re().FindStringSubmatchIndex(s) -} - -func (r *Regexp) ReplaceAllString(src, repl string) string { - return r.re().ReplaceAllString(src, repl) -} - -func (r *Regexp) FindString(s string) string { - return r.re().FindString(s) -} - -func (r *Regexp) FindAllString(s string, n int) []string { - return r.re().FindAllString(s, n) -} - -func (r *Regexp) MatchString(s string) bool { - return r.re().MatchString(s) -} - -func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - return r.re().ReplaceAllStringFunc(src, repl) -} - -func (r *Regexp) SubexpNames() []string { - return r.re().SubexpNames() -} - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -// New creates a new lazy regexp, delaying the compiling work until it is first -// needed. If the code is being run as part of tests, the regexp compiling will -// happen immediately. -func New(str string) *Regexp { - lr := &Regexp{str: str} - if inTest { - // In tests, always compile the regexps early. - lr.re() - } - return lr -} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go deleted file mode 100644 index e899f4de..00000000 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ /dev/null @@ -1,46 +0,0 @@ -package multierror - -import ( - "strings" -) - -// Join is a drop-in replacement for errors.Join with better formatting. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - if len(e.errs) == 1 { - return strings.TrimSpace(e.errs[0].Error()) - } - stringErrs := make([]string, 0, len(e.errs)) - for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) - } - return "* " + strings.Join(stringErrs, "\n* ") -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 3d072808..13df228c 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -151,9 +151,9 @@ type JSONMessage struct { // Deprecated: this field is deprecated since docker v0.7.1 / API v1.8. Use the information in [Progress] instead. This field will be omitted in a future release. ProgressMessage string `json:"progress,omitempty"` ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` + From string `json:"from,omitempty"` // Deprecated: this field is no longer set in stream responses and should not be used. + Time int64 `json:"time,omitempty"` // Deprecated: this field is no longer set in stream responses and should not be used. + TimeNano int64 `json:"timeNano,omitempty"` // Deprecated: this field is no longer set in stream responses and should not be used. Error *JSONError `json:"errorDetail,omitempty"` // ErrorMessage contains errors encountered during the operation. diff --git a/vendor/github.com/evertras/bubble-table/LICENSE b/vendor/github.com/evertras/bubble-table/LICENSE new file mode 100644 index 00000000..0a30e64a --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Brandon Fulljames + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/evertras/bubble-table/table/border.go b/vendor/github.com/evertras/bubble-table/table/border.go new file mode 100644 index 00000000..756d4c81 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/border.go @@ -0,0 +1,439 @@ +package table + +import "github.com/charmbracelet/lipgloss" + +// Border defines the borders in and around the table. +type Border struct { + Top string + Left string + Right string + Bottom string + TopRight string + TopLeft string + BottomRight string + BottomLeft string + + TopJunction string + LeftJunction string + RightJunction string + BottomJunction string + + InnerJunction string + + InnerDivider string + + // Styles for 2x2 tables and larger + styleMultiTopLeft lipgloss.Style + styleMultiTop lipgloss.Style + styleMultiTopRight lipgloss.Style + styleMultiRight lipgloss.Style + styleMultiBottomRight lipgloss.Style + styleMultiBottom lipgloss.Style + styleMultiBottomLeft lipgloss.Style + styleMultiLeft lipgloss.Style + styleMultiInner lipgloss.Style + + // Styles for a single column table + styleSingleColumnTop lipgloss.Style + styleSingleColumnInner lipgloss.Style + styleSingleColumnBottom lipgloss.Style + + // Styles for a single row table + styleSingleRowLeft lipgloss.Style + styleSingleRowInner lipgloss.Style + styleSingleRowRight lipgloss.Style + + // Style for a table with only one cell + styleSingleCell lipgloss.Style + + // Style for the footer + styleFooter lipgloss.Style +} + +var ( + // https://www.w3.org/TR/xml-entity-names/025.html + + borderDefault = Border{ + Top: "━", + Left: "┃", + Right: "┃", + Bottom: "━", + + TopRight: "┓", + TopLeft: "┏", + BottomRight: "┛", + BottomLeft: "┗", + + TopJunction: "┳", + LeftJunction: "┣", + RightJunction: "┫", + BottomJunction: "┻", + InnerJunction: "╋", + + InnerDivider: "┃", + } + + borderRounded = Border{ + Top: "─", + Left: "│", + Right: "│", + Bottom: "─", + + TopRight: "╮", + TopLeft: "╭", + BottomRight: "╯", + BottomLeft: "╰", + + TopJunction: "┬", + LeftJunction: "├", + RightJunction: "┤", + BottomJunction: "┴", + InnerJunction: "┼", + + InnerDivider: "│", + } +) + +func init() { + borderDefault.generateStyles() + borderRounded.generateStyles() +} + +func (b *Border) generateStyles() { + b.generateMultiStyles() + b.generateSingleColumnStyles() + b.generateSingleRowStyles() + b.generateSingleCellStyle() + + // The footer is a single cell with the top taken off... usually. We can + // re-enable the top if needed this way for certain format configurations. + b.styleFooter = b.styleSingleCell.Copy(). + Align(lipgloss.Right). + BorderBottom(true). + BorderRight(true). + BorderLeft(true) +} + +func (b *Border) styleLeftWithFooter(original lipgloss.Style) lipgloss.Style { + border := original.GetBorderStyle() + + border.BottomLeft = b.LeftJunction + + return original.Copy().BorderStyle(border) +} + +func (b *Border) styleRightWithFooter(original lipgloss.Style) lipgloss.Style { + border := original.GetBorderStyle() + + border.BottomRight = b.RightJunction + + return original.Copy().BorderStyle(border) +} + +func (b *Border) styleBothWithFooter(original lipgloss.Style) lipgloss.Style { + border := original.GetBorderStyle() + + border.BottomLeft = b.LeftJunction + border.BottomRight = b.RightJunction + + return original.Copy().BorderStyle(border) +} + +// This function is long, but it's just repetitive... +// +//nolint:funlen +func (b *Border) generateMultiStyles() { + b.styleMultiTopLeft = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + TopLeft: b.TopLeft, + Top: b.Top, + TopRight: b.TopJunction, + Right: b.InnerDivider, + BottomRight: b.InnerJunction, + Bottom: b.Bottom, + BottomLeft: b.LeftJunction, + Left: b.Left, + }, + ) + + b.styleMultiTop = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Right: b.InnerDivider, + Bottom: b.Bottom, + + TopRight: b.TopJunction, + BottomRight: b.InnerJunction, + }, + ).BorderTop(true).BorderBottom(true).BorderRight(true) + + b.styleMultiTopRight = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Right: b.Right, + Bottom: b.Bottom, + + TopRight: b.TopRight, + BottomRight: b.RightJunction, + }, + ).BorderTop(true).BorderBottom(true).BorderRight(true) + + b.styleMultiLeft = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Left: b.Left, + Right: b.InnerDivider, + }, + ).BorderRight(true).BorderLeft(true) + + b.styleMultiRight = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Right: b.Right, + }, + ).BorderRight(true) + + b.styleMultiInner = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Right: b.InnerDivider, + }, + ).BorderRight(true) + + b.styleMultiBottomLeft = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Left: b.Left, + Right: b.InnerDivider, + Bottom: b.Bottom, + + BottomLeft: b.BottomLeft, + BottomRight: b.BottomJunction, + }, + ).BorderLeft(true).BorderBottom(true).BorderRight(true) + + b.styleMultiBottom = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Right: b.InnerDivider, + Bottom: b.Bottom, + + BottomRight: b.BottomJunction, + }, + ).BorderBottom(true).BorderRight(true) + + b.styleMultiBottomRight = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Right: b.Right, + Bottom: b.Bottom, + + BottomRight: b.BottomRight, + }, + ).BorderBottom(true).BorderRight(true) +} + +func (b *Border) generateSingleColumnStyles() { + b.styleSingleColumnTop = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Left: b.Left, + Right: b.Right, + Bottom: b.Bottom, + + TopLeft: b.TopLeft, + TopRight: b.TopRight, + BottomLeft: b.LeftJunction, + BottomRight: b.RightJunction, + }, + ) + + b.styleSingleColumnInner = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Left: b.Left, + Right: b.Right, + }, + ).BorderRight(true).BorderLeft(true) + + b.styleSingleColumnBottom = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Left: b.Left, + Right: b.Right, + Bottom: b.Bottom, + + BottomLeft: b.BottomLeft, + BottomRight: b.BottomRight, + }, + ).BorderRight(true).BorderLeft(true).BorderBottom(true) +} + +func (b *Border) generateSingleRowStyles() { + b.styleSingleRowLeft = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Left: b.Left, + Right: b.InnerDivider, + Bottom: b.Bottom, + + BottomLeft: b.BottomLeft, + BottomRight: b.BottomJunction, + TopRight: b.TopJunction, + TopLeft: b.TopLeft, + }, + ) + + b.styleSingleRowInner = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Right: b.InnerDivider, + Bottom: b.Bottom, + + BottomRight: b.BottomJunction, + TopRight: b.TopJunction, + }, + ).BorderTop(true).BorderBottom(true).BorderRight(true) + + b.styleSingleRowRight = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Right: b.Right, + Bottom: b.Bottom, + + BottomRight: b.BottomRight, + TopRight: b.TopRight, + }, + ).BorderTop(true).BorderBottom(true).BorderRight(true) +} + +func (b *Border) generateSingleCellStyle() { + b.styleSingleCell = lipgloss.NewStyle().BorderStyle( + lipgloss.Border{ + Top: b.Top, + Left: b.Left, + Right: b.Right, + Bottom: b.Bottom, + + BottomLeft: b.BottomLeft, + BottomRight: b.BottomRight, + TopRight: b.TopRight, + TopLeft: b.TopLeft, + }, + ) +} + +// BorderDefault uses the basic square border, useful to reset the border if +// it was changed somehow. +func (m Model) BorderDefault() Model { + // Already generated styles + m.border = borderDefault + + return m +} + +// BorderRounded uses a thin, rounded border. +func (m Model) BorderRounded() Model { + // Already generated styles + m.border = borderRounded + + return m +} + +// Border uses the given border components to render the table. +func (m Model) Border(border Border) Model { + border.generateStyles() + + m.border = border + + return m +} + +type borderStyleRow struct { + left lipgloss.Style + inner lipgloss.Style + right lipgloss.Style +} + +func (b *borderStyleRow) inherit(s lipgloss.Style) { + b.left = b.left.Copy().Inherit(s) + b.inner = b.inner.Copy().Inherit(s) + b.right = b.right.Copy().Inherit(s) +} + +// There's a lot of branches here, but splitting it up further would make it +// harder to follow. So just be careful with comments and make sure it's tested! +// +//nolint:nestif +func (m Model) styleHeaders() borderStyleRow { + hasRows := len(m.GetVisibleRows()) > 0 || m.calculatePadding(0) > 0 + singleColumn := len(m.columns) == 1 + styles := borderStyleRow{} + + // Possible configurations: + // - Single cell + // - Single row + // - Single column + // - Multi + + if singleColumn { + if hasRows { + // Single column + styles.left = m.border.styleSingleColumnTop + styles.inner = styles.left + styles.right = styles.left + } else { + // Single cell + styles.left = m.border.styleSingleCell + styles.inner = styles.left + styles.right = styles.left + + if m.hasFooter() { + styles.left = m.border.styleBothWithFooter(styles.left) + } + } + } else if !hasRows { + // Single row + styles.left = m.border.styleSingleRowLeft + styles.inner = m.border.styleSingleRowInner + styles.right = m.border.styleSingleRowRight + + if m.hasFooter() { + styles.left = m.border.styleLeftWithFooter(styles.left) + styles.right = m.border.styleRightWithFooter(styles.right) + } + } else { + // Multi + styles.left = m.border.styleMultiTopLeft + styles.inner = m.border.styleMultiTop + styles.right = m.border.styleMultiTopRight + } + + styles.inherit(m.headerStyle) + + return styles +} + +func (m Model) styleRows() (inner borderStyleRow, last borderStyleRow) { + if len(m.columns) == 1 { + inner.left = m.border.styleSingleColumnInner + inner.inner = inner.left + inner.right = inner.left + + last.left = m.border.styleSingleColumnBottom + + if m.hasFooter() { + last.left = m.border.styleBothWithFooter(last.left) + } + + last.inner = last.left + last.right = last.left + } else { + inner.left = m.border.styleMultiLeft + inner.inner = m.border.styleMultiInner + inner.right = m.border.styleMultiRight + + last.left = m.border.styleMultiBottomLeft + last.inner = m.border.styleMultiBottom + last.right = m.border.styleMultiBottomRight + + if m.hasFooter() { + last.left = m.border.styleLeftWithFooter(last.left) + last.right = m.border.styleRightWithFooter(last.right) + } + } + + return inner, last +} diff --git a/vendor/github.com/evertras/bubble-table/table/calc.go b/vendor/github.com/evertras/bubble-table/table/calc.go new file mode 100644 index 00000000..21b7a7d7 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/calc.go @@ -0,0 +1,36 @@ +package table + +// Keep compatibility with Go 1.21 by re-declaring min. +// +//nolint:predeclared +func min(x, y int) int { + if x < y { + return x + } + + return y +} + +// Keep compatibility with Go 1.21 by re-declaring max. +// +//nolint:predeclared +func max(x, y int) int { + if x > y { + return x + } + + return y +} + +// These var names are fine for this little function +// +//nolint:varnamelen +func gcd(x, y int) int { + if x == 0 { + return y + } else if y == 0 { + return x + } + + return gcd(y%x, x) +} diff --git a/vendor/github.com/evertras/bubble-table/table/cell.go b/vendor/github.com/evertras/bubble-table/table/cell.go new file mode 100644 index 00000000..ac9e5c40 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/cell.go @@ -0,0 +1,60 @@ +package table + +import "github.com/charmbracelet/lipgloss" + +// StyledCell represents a cell in the table that has a particular style applied. +// The cell style takes highest precedence and will overwrite more general styles +// from the row, column, or table as a whole. This style should be generally +// limited to colors, font style, and alignments - spacing style such as margin +// will break the table format. +type StyledCell struct { + // Data is the content of the cell. + Data any + + // Style is the specific style to apply. This is ignored if StyleFunc is not nil. + Style lipgloss.Style + + // StyleFunc is a function that takes the row/column of the cell and + // returns a lipgloss.Style allowing for dynamic styling based on the cell's + // content or position. Overrides Style if set. + StyleFunc StyledCellFunc +} + +// StyledCellFuncInput is the input to the StyledCellFunc. Sent as a struct +// to allow for future additions without breaking changes. +type StyledCellFuncInput struct { + // Data is the data in the cell. + Data any + + // Column is the column that the cell belongs to. + Column Column + + // Row is the row that the cell belongs to. + Row Row + + // GlobalMetadata is the global table metadata that's been set by WithGlobalMetadata + GlobalMetadata map[string]any +} + +// StyledCellFunc is a function that takes various information about the cell and +// returns a lipgloss.Style allowing for easier dynamic styling based on the cell's +// content or position. +type StyledCellFunc = func(input StyledCellFuncInput) lipgloss.Style + +// NewStyledCell creates an entry that can be set in the row data and show as +// styled with the given style. +func NewStyledCell(data any, style lipgloss.Style) StyledCell { + return StyledCell{ + Data: data, + Style: style, + } +} + +// NewStyledCellWithStyleFunc creates an entry that can be set in the row data and show as +// styled with the given style function. +func NewStyledCellWithStyleFunc(data any, styleFunc StyledCellFunc) StyledCell { + return StyledCell{ + Data: data, + StyleFunc: styleFunc, + } +} diff --git a/vendor/github.com/evertras/bubble-table/table/column.go b/vendor/github.com/evertras/bubble-table/table/column.go new file mode 100644 index 00000000..44714755 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/column.go @@ -0,0 +1,118 @@ +package table + +import ( + "github.com/charmbracelet/lipgloss" +) + +// Column is a column in the table. +type Column struct { + title string + key string + width int + + flexFactor int + + filterable bool + style lipgloss.Style + + fmtString string +} + +// NewColumn creates a new fixed-width column with the given information. +func NewColumn(key, title string, width int) Column { + return Column{ + key: key, + title: title, + width: width, + + filterable: false, + } +} + +// NewFlexColumn creates a new flexible width column that tries to fill in the +// total table width. If multiple flex columns exist, each will measure against +// each other depending on their flexFactor. For example, if both have a flexFactor +// of 1, they will have equal width. If one has a flexFactor of 1 and the other +// has a flexFactor of 3, the second will be 3 times larger than the first. You +// must use WithTargetWidth if you have any flex columns, so that the table knows +// how much width it should fill. +func NewFlexColumn(key, title string, flexFactor int) Column { + return Column{ + key: key, + title: title, + + flexFactor: max(flexFactor, 1), + } +} + +// WithStyle applies a style to the column as a whole. +func (c Column) WithStyle(style lipgloss.Style) Column { + c.style = style.Copy().Width(c.width) + + return c +} + +// WithFiltered sets whether the column should be considered for filtering (true) +// or not (false). +func (c Column) WithFiltered(filterable bool) Column { + c.filterable = filterable + + return c +} + +// WithFormatString sets the format string used by fmt.Sprintf to display the data. +// If not set, the default is "%v" for all data types. Intended mainly for +// numeric formatting. +// +// Since data is of the any type, make sure that all data in the column +// is of the expected type or the format may fail. For example, hardcoding '3' +// instead of '3.0' and using '%.2f' will fail because '3' is an integer. +func (c Column) WithFormatString(fmtString string) Column { + c.fmtString = fmtString + + return c +} + +func (c *Column) isFlex() bool { + return c.flexFactor != 0 +} + +// Title returns the title of the column. +func (c Column) Title() string { + return c.title +} + +// Key returns the key of the column. +func (c Column) Key() string { + return c.key +} + +// Width returns the width of the column. +func (c Column) Width() int { + return c.width +} + +// FlexFactor returns the flex factor of the column. +func (c Column) FlexFactor() int { + return c.flexFactor +} + +// IsFlex returns whether the column is a flex column. +func (c Column) IsFlex() bool { + return c.isFlex() +} + +// Filterable returns whether the column is filterable. +func (c Column) Filterable() bool { + return c.filterable +} + +// Style returns the style of the column. +func (c Column) Style() lipgloss.Style { + return c.style +} + +// FmtString returns the format string of the column. +func (c Column) FmtString() string { + return c.fmtString +} diff --git a/vendor/github.com/evertras/bubble-table/table/data.go b/vendor/github.com/evertras/bubble-table/table/data.go new file mode 100644 index 00000000..901a4f4b --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/data.go @@ -0,0 +1,67 @@ +package table + +import "time" + +// This is just a bunch of data type checks, so... no linting here +// +//nolint:cyclop +func asInt(data any) (int64, bool) { + switch val := data.(type) { + case int: + return int64(val), true + + case int8: + return int64(val), true + + case int16: + return int64(val), true + + case int32: + return int64(val), true + + case int64: + return val, true + + case uint: + // #nosec: G115 + return int64(val), true + + case uint8: + return int64(val), true + + case uint16: + return int64(val), true + + case uint32: + return int64(val), true + + case uint64: + // #nosec: G115 + return int64(val), true + + case time.Duration: + return int64(val), true + + case StyledCell: + return asInt(val.Data) + } + + return 0, false +} + +func asNumber(data any) (float64, bool) { + switch val := data.(type) { + case float32: + return float64(val), true + + case float64: + return val, true + + case StyledCell: + return asNumber(val.Data) + } + + intVal, isInt := asInt(data) + + return float64(intVal), isInt +} diff --git a/vendor/github.com/evertras/bubble-table/table/dimensions.go b/vendor/github.com/evertras/bubble-table/table/dimensions.go new file mode 100644 index 00000000..38074616 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/dimensions.go @@ -0,0 +1,116 @@ +package table + +import ( + "github.com/charmbracelet/lipgloss" +) + +func (m *Model) recalculateWidth() { + if m.targetTotalWidth != 0 { + m.totalWidth = m.targetTotalWidth + } else { + total := 0 + + for _, column := range m.columns { + total += column.width + } + + m.totalWidth = total + len(m.columns) + 1 + } + + updateColumnWidths(m.columns, m.targetTotalWidth) + + m.recalculateLastHorizontalColumn() +} + +// Updates column width in-place. This could be optimized but should be called +// very rarely so we prioritize simplicity over performance here. +func updateColumnWidths(cols []Column, totalWidth int) { + totalFlexWidth := totalWidth - len(cols) - 1 + totalFlexFactor := 0 + flexGCD := 0 + + for index, col := range cols { + if !col.isFlex() { + totalFlexWidth -= col.width + cols[index].style = col.style.Width(col.width) + } else { + totalFlexFactor += col.flexFactor + flexGCD = gcd(flexGCD, col.flexFactor) + } + } + + if totalFlexFactor == 0 { + return + } + + // We use the GCD here because otherwise very large values won't divide + // nicely as ints + totalFlexFactor /= flexGCD + + flexUnit := totalFlexWidth / totalFlexFactor + leftoverWidth := totalFlexWidth % totalFlexFactor + + for index := range cols { + if !cols[index].isFlex() { + continue + } + + width := flexUnit * (cols[index].flexFactor / flexGCD) + + if leftoverWidth > 0 { + width++ + leftoverWidth-- + } + + if index == len(cols)-1 { + width += leftoverWidth + leftoverWidth = 0 + } + + width = max(width, 1) + + cols[index].width = width + + // Take borders into account for the actual style + cols[index].style = cols[index].style.Width(width) + } +} + +func (m *Model) recalculateHeight() { + header := m.renderHeaders() + headerHeight := 1 // Header always has the top border + if m.headerVisible { + headerHeight = lipgloss.Height(header) + } + + footer := m.renderFooter(lipgloss.Width(header), false) + var footerHeight int + if footer != "" { + footerHeight = lipgloss.Height(footer) + } + + m.metaHeight = headerHeight + footerHeight +} + +func (m *Model) calculatePadding(numRows int) int { + if m.minimumHeight == 0 { + return 0 + } + + padding := m.minimumHeight - m.metaHeight - numRows - 1 // additional 1 for bottom border + + if padding == 0 && numRows == 0 { + // This is an edge case where we want to add 1 additional line of height, i.e. + // add a border without an empty row. However, this is not possible, so we need + // to add an extra row which will result in the table being 1 row taller than + // the requested minimum height. + return 1 + } + + if padding < 0 { + // Table is already larger than minimum height, do nothing. + return 0 + } + + return padding +} diff --git a/vendor/github.com/evertras/bubble-table/table/doc.go b/vendor/github.com/evertras/bubble-table/table/doc.go new file mode 100644 index 00000000..554944af --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/doc.go @@ -0,0 +1,39 @@ +/* +Package table contains a Bubble Tea component for an interactive and customizable +table. + +The simplest useful table can be created with table.New(...).WithRows(...). Row +data should map to the column keys, as shown below. Note that extra data will +simply not be shown, while missing data will be safely blank in the row's cell. + + const ( + // This is not necessary, but recommended to avoid typos + columnKeyName = "name" + columnKeyCount = "count" + ) + + // Define the columns and how they appear + columns := []table.Column{ + table.NewColumn(columnKeyName, "Name", 10), + table.NewColumn(columnKeyCount, "Count", 6), + } + + // Define the data that will be in the table, mapping to the column keys + rows := []table.Row{ + table.NewRow(table.RowData{ + columnKeyName: "Cheeseburger", + columnKeyCount: 3, + }), + table.NewRow(table.RowData{ + columnKeyName: "Fries", + columnKeyCount: 2, + }), + } + + // Create the table + tbl := table.New(columns).WithRows(rows) + + // Use it like any Bubble Tea component in your view + tbl.View() +*/ +package table diff --git a/vendor/github.com/evertras/bubble-table/table/events.go b/vendor/github.com/evertras/bubble-table/table/events.go new file mode 100644 index 00000000..b519623d --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/events.go @@ -0,0 +1,60 @@ +package table + +// UserEvent is some state change that has occurred due to user input. These will +// ONLY be generated when a user has interacted directly with the table. These +// will NOT be generated when code programmatically changes values in the table. +type UserEvent any + +func (m *Model) appendUserEvent(e UserEvent) { + m.lastUpdateUserEvents = append(m.lastUpdateUserEvents, e) +} + +func (m *Model) clearUserEvents() { + m.lastUpdateUserEvents = nil +} + +// GetLastUpdateUserEvents returns a list of events that happened due to user +// input in the last Update call. This is useful to look for triggers such as +// whether the user moved to a new highlighted row. +func (m *Model) GetLastUpdateUserEvents() []UserEvent { + // Most common case + if len(m.lastUpdateUserEvents) == 0 { + return nil + } + + returned := make([]UserEvent, len(m.lastUpdateUserEvents)) + + // Slightly wasteful but helps guarantee immutability, and this should only + // have data very rarely so this is fine + copy(returned, m.lastUpdateUserEvents) + + return returned +} + +// UserEventHighlightedIndexChanged indicates that the user has scrolled to a new +// row. +type UserEventHighlightedIndexChanged struct { + // PreviousRow is the row that was selected before the change. + PreviousRowIndex int + + // SelectedRow is the row index that is now selected + SelectedRowIndex int +} + +// UserEventRowSelectToggled indicates that the user has either selected or +// deselected a row by toggling the selection. The event contains information +// about which row index was selected and whether it was selected or deselected. +type UserEventRowSelectToggled struct { + RowIndex int + IsSelected bool +} + +// UserEventFilterInputFocused indicates that the user has focused the filter +// text input, so that any other typing will type into the filter field. Only +// activates for the built-in filter text box. +type UserEventFilterInputFocused struct{} + +// UserEventFilterInputUnfocused indicates that the user has unfocused the filter +// text input, which means the user is done typing into the filter field. Only +// activates for the built-in filter text box. +type UserEventFilterInputUnfocused struct{} diff --git a/vendor/github.com/evertras/bubble-table/table/filter.go b/vendor/github.com/evertras/bubble-table/table/filter.go new file mode 100644 index 00000000..765e5b33 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/filter.go @@ -0,0 +1,164 @@ +package table + +import ( + "fmt" + "strings" +) + +// FilterFuncInput is the input to a FilterFunc. It's a struct so we can add more things later +// without breaking compatibility. +type FilterFuncInput struct { + // Columns is a list of the columns of the table + Columns []Column + + // Row is the row that's being considered for filtering + Row Row + + // GlobalMetadata is an arbitrary set of metadata from the table set by WithGlobalMetadata + GlobalMetadata map[string]any + + // Filter is the filter string input to consider + Filter string +} + +// FilterFunc takes a FilterFuncInput and returns true if the row should be visible, +// or false if the row should be hidden. +type FilterFunc func(FilterFuncInput) bool + +func (m Model) getFilteredRows(rows []Row) []Row { + filterInputValue := m.filterTextInput.Value() + if !m.filtered || filterInputValue == "" { + return rows + } + + filteredRows := make([]Row, 0) + + for _, row := range rows { + var availableFilterFunc FilterFunc + + if m.filterFunc != nil { + availableFilterFunc = m.filterFunc + } else { + availableFilterFunc = filterFuncContains + } + + if availableFilterFunc(FilterFuncInput{ + Columns: m.columns, + Row: row, + Filter: filterInputValue, + GlobalMetadata: m.metadata, + }) { + filteredRows = append(filteredRows, row) + } + } + + return filteredRows +} + +// filterFuncContains returns a filterFunc that performs case-insensitive +// "contains" matching over all filterable columns in a row. +func filterFuncContains(input FilterFuncInput) bool { + if input.Filter == "" { + return true + } + + checkedAny := false + + filterLower := strings.ToLower(input.Filter) + + for _, column := range input.Columns { + if !column.filterable { + continue + } + + checkedAny = true + + data, ok := input.Row.Data[column.key] + + if !ok { + continue + } + + // Extract internal StyledCell data + switch dataV := data.(type) { + case StyledCell: + data = dataV.Data + } + + var target string + switch dataV := data.(type) { + case string: + target = dataV + + case fmt.Stringer: + target = dataV.String() + + default: + target = fmt.Sprintf("%v", data) + } + + if strings.Contains(strings.ToLower(target), filterLower) { + return true + } + } + + return !checkedAny +} + +// filterFuncFuzzy returns a filterFunc that performs case-insensitive fuzzy +// matching (subsequence) over the concatenation of all filterable column values. +func filterFuncFuzzy(input FilterFuncInput) bool { + filter := strings.TrimSpace(input.Filter) + if filter == "" { + return true + } + + var builder strings.Builder + for _, col := range input.Columns { + if !col.filterable { + continue + } + value, ok := input.Row.Data[col.key] + if !ok { + continue + } + if sc, ok := value.(StyledCell); ok { + value = sc.Data + } + builder.WriteString(fmt.Sprint(value)) // uses Stringer if implemented + builder.WriteByte(' ') + } + + haystack := strings.ToLower(builder.String()) + if haystack == "" { + return false + } + + for _, token := range strings.Fields(strings.ToLower(filter)) { + if !fuzzySubsequenceMatch(haystack, token) { + return false + } + } + + return true +} + +// fuzzySubsequenceMatch returns true if all runes in needle appear in order +// within haystack (not necessarily contiguously). Case must be normalized by caller. +func fuzzySubsequenceMatch(haystack, needle string) bool { + if needle == "" { + return true + } + haystackIndex, needleIndex := 0, 0 + haystackRunes := []rune(haystack) + needleRunes := []rune(needle) + + for haystackIndex < len(haystackRunes) && needleIndex < len(needleRunes) { + if haystackRunes[haystackIndex] == needleRunes[needleIndex] { + needleIndex++ + } + haystackIndex++ + } + + return needleIndex == len(needleRunes) +} diff --git a/vendor/github.com/evertras/bubble-table/table/footer.go b/vendor/github.com/evertras/bubble-table/table/footer.go new file mode 100644 index 00000000..c68709ce --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/footer.go @@ -0,0 +1,51 @@ +package table + +import ( + "fmt" + "strings" +) + +func (m Model) hasFooter() bool { + return m.footerVisible && (m.staticFooter != "" || m.pageSize != 0 || m.filtered) +} + +func (m Model) renderFooter(width int, includeTop bool) string { + if !m.hasFooter() { + return "" + } + + const borderAdjustment = 2 + + styleFooter := m.baseStyle.Copy().Inherit(m.border.styleFooter).Width(width - borderAdjustment) + + if includeTop { + styleFooter = styleFooter.BorderTop(true) + } + + if m.staticFooter != "" { + return styleFooter.Render(m.staticFooter) + } + + sections := []string{} + + if m.filtered && (m.filterTextInput.Focused() || m.filterTextInput.Value() != "") { + sections = append(sections, m.filterTextInput.View()) + } + + // paged feature enabled + if m.pageSize != 0 { + str := fmt.Sprintf("%d/%d", m.CurrentPage(), m.MaxPages()) + if m.filtered && m.filterTextInput.Focused() { + // Need to apply inline style here in case of filter input cursor, because + // the input cursor resets the style after rendering. Note that Inline(true) + // creates a copy, so it's safe to use here without mutating the underlying + // base style. + str = m.baseStyle.Inline(true).Render(str) + } + sections = append(sections, str) + } + + footerText := strings.Join(sections, " ") + + return styleFooter.Render(footerText) +} diff --git a/vendor/github.com/evertras/bubble-table/table/header.go b/vendor/github.com/evertras/bubble-table/table/header.go new file mode 100644 index 00000000..fdd5ac0b --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/header.go @@ -0,0 +1,93 @@ +package table + +import "github.com/charmbracelet/lipgloss" + +// This is long and could use some refactoring in the future, but unsure of how +// to pick it apart right now. +// +//nolint:funlen,cyclop +func (m Model) renderHeaders() string { + headerStrings := []string{} + + totalRenderedWidth := 0 + + headerStyles := m.styleHeaders() + + renderHeader := func(column Column, borderStyle lipgloss.Style) string { + borderStyle = borderStyle.Inherit(column.style).Inherit(m.baseStyle) + + headerSection := limitStr(column.title, column.width) + + return borderStyle.Render(headerSection) + } + + for columnIndex, column := range m.columns { + var borderStyle lipgloss.Style + + if m.horizontalScrollOffsetCol > 0 && columnIndex == m.horizontalScrollFreezeColumnsCount { + if columnIndex == 0 { + borderStyle = headerStyles.left.Copy() + } else { + borderStyle = headerStyles.inner.Copy() + } + + rendered := renderHeader(genOverflowColumnLeft(1), borderStyle) + + totalRenderedWidth += lipgloss.Width(rendered) + + headerStrings = append(headerStrings, rendered) + } + + if columnIndex >= m.horizontalScrollFreezeColumnsCount && + columnIndex < m.horizontalScrollOffsetCol+m.horizontalScrollFreezeColumnsCount { + continue + } + + if len(headerStrings) == 0 { + borderStyle = headerStyles.left.Copy() + } else if columnIndex < len(m.columns)-1 { + borderStyle = headerStyles.inner.Copy() + } else { + borderStyle = headerStyles.right.Copy() + } + + rendered := renderHeader(column, borderStyle) + + if m.maxTotalWidth != 0 { + renderedWidth := lipgloss.Width(rendered) + + const ( + borderAdjustment = 1 + overflowColWidth = 2 + ) + + targetWidth := m.maxTotalWidth - overflowColWidth + + if columnIndex == len(m.columns)-1 { + // If this is the last header, we don't need to account for the + // overflow arrow column + targetWidth = m.maxTotalWidth + } + + if totalRenderedWidth+renderedWidth > targetWidth { + overflowWidth := m.maxTotalWidth - totalRenderedWidth - borderAdjustment + overflowStyle := genOverflowStyle(headerStyles.right, overflowWidth) + overflowColumn := genOverflowColumnRight(overflowWidth) + + overflowStr := renderHeader(overflowColumn, overflowStyle) + + headerStrings = append(headerStrings, overflowStr) + + break + } + + totalRenderedWidth += renderedWidth + } + + headerStrings = append(headerStrings, rendered) + } + + headerBlock := lipgloss.JoinHorizontal(lipgloss.Bottom, headerStrings...) + + return headerBlock +} diff --git a/vendor/github.com/evertras/bubble-table/table/keys.go b/vendor/github.com/evertras/bubble-table/table/keys.go new file mode 100644 index 00000000..fe2eccc3 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/keys.go @@ -0,0 +1,120 @@ +package table + +import "github.com/charmbracelet/bubbles/key" + +// KeyMap defines the keybindings for the table when it's focused. +type KeyMap struct { + RowDown key.Binding + RowUp key.Binding + + RowSelectToggle key.Binding + + PageDown key.Binding + PageUp key.Binding + PageFirst key.Binding + PageLast key.Binding + + // Filter allows the user to start typing and filter the rows. + Filter key.Binding + + // FilterBlur is the key that stops the user's input from typing into the filter. + FilterBlur key.Binding + + // FilterClear will clear the filter while it's blurred. + FilterClear key.Binding + + // ScrollRight will move one column to the right when overflow occurs. + ScrollRight key.Binding + + // ScrollLeft will move one column to the left when overflow occurs. + ScrollLeft key.Binding +} + +// DefaultKeyMap returns a set of sensible defaults for controlling a focused table with help text. +func DefaultKeyMap() KeyMap { + return KeyMap{ + RowDown: key.NewBinding( + key.WithKeys("down", "j"), + key.WithHelp("↓/j", "move down"), + ), + RowUp: key.NewBinding( + key.WithKeys("up", "k"), + key.WithHelp("↑/k", "move up"), + ), + RowSelectToggle: key.NewBinding( + key.WithKeys(" ", "enter"), + key.WithHelp("/enter", "select row"), + ), + PageDown: key.NewBinding( + key.WithKeys("right", "l", "pgdown"), + key.WithHelp("→/h/page down", "next page"), + ), + PageUp: key.NewBinding( + key.WithKeys("left", "h", "pgup"), + key.WithHelp("←/h/page up", "previous page"), + ), + PageFirst: key.NewBinding( + key.WithKeys("home", "g"), + key.WithHelp("home/g", "first page"), + ), + PageLast: key.NewBinding( + key.WithKeys("end", "G"), + key.WithHelp("end/G", "last page"), + ), + Filter: key.NewBinding( + key.WithKeys("/"), + key.WithHelp("/", "filter"), + ), + FilterBlur: key.NewBinding( + key.WithKeys("enter", "esc"), + key.WithHelp("enter/esc", "unfocus"), + ), + FilterClear: key.NewBinding( + key.WithKeys("esc"), + key.WithHelp("esc", "clear filter"), + ), + ScrollRight: key.NewBinding( + key.WithKeys("shift+right"), + key.WithHelp("shift+→", "scroll right"), + ), + ScrollLeft: key.NewBinding( + key.WithKeys("shift+left"), + key.WithHelp("shift+←", "scroll left"), + ), + } +} + +// FullHelp returns a multi row view of all the helpkeys that are defined. Needed to fullfil the 'help.Model' interface. +// Also appends all user defined extra keys to the help. +func (m Model) FullHelp() [][]key.Binding { + keyBinds := [][]key.Binding{ + {m.keyMap.RowDown, m.keyMap.RowUp, m.keyMap.RowSelectToggle}, + {m.keyMap.PageDown, m.keyMap.PageUp, m.keyMap.PageFirst, m.keyMap.PageLast}, + {m.keyMap.Filter, m.keyMap.FilterBlur, m.keyMap.FilterClear, m.keyMap.ScrollRight, m.keyMap.ScrollLeft}, + } + if m.additionalFullHelpKeys != nil { + keyBinds = append(keyBinds, m.additionalFullHelpKeys()) + } + + return keyBinds +} + +// ShortHelp just returns a single row of help views. Needed to fullfil the 'help.Model' interface. +// Also appends all user defined extra keys to the help. +func (m Model) ShortHelp() []key.Binding { + keyBinds := []key.Binding{ + m.keyMap.RowDown, + m.keyMap.RowUp, + m.keyMap.RowSelectToggle, + m.keyMap.PageDown, + m.keyMap.PageUp, + m.keyMap.Filter, + m.keyMap.FilterBlur, + m.keyMap.FilterClear, + } + if m.additionalShortHelpKeys != nil { + keyBinds = append(keyBinds, m.additionalShortHelpKeys()...) + } + + return keyBinds +} diff --git a/vendor/github.com/evertras/bubble-table/table/model.go b/vendor/github.com/evertras/bubble-table/table/model.go new file mode 100644 index 00000000..33e1458c --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/model.go @@ -0,0 +1,148 @@ +package table + +import ( + "github.com/charmbracelet/bubbles/key" + "github.com/charmbracelet/bubbles/textinput" + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" +) + +const ( + columnKeySelect = "___select___" +) + +var ( + defaultHighlightStyle = lipgloss.NewStyle().Background(lipgloss.Color("#334")) +) + +// Model is the main table model. Create using New(). +type Model struct { + // Data + columns []Column + rows []Row + metadata map[string]any + + // Caches for optimizations + visibleRowCacheUpdated bool + visibleRowCache []Row + + // Shown when data is missing from a row + missingDataIndicator any + + // Interaction + focused bool + keyMap KeyMap + + // Taken from: 'Bubbles/List' + // Additional key mappings for the short and full help views. This allows + // you to add additional key mappings to the help menu without + // re-implementing the help component. Of course, you can also disable the + // list's help component and implement a new one if you need more + // flexibility. + // You have to supply a keybinding like this: + // key.NewBinding( key.WithKeys("shift+left"), key.WithHelp("shift+←", "scroll left")) + // It needs both 'WithKeys' and 'WithHelp' + additionalShortHelpKeys func() []key.Binding + additionalFullHelpKeys func() []key.Binding + + selectableRows bool + rowCursorIndex int + + // Events + lastUpdateUserEvents []UserEvent + + // Styles + baseStyle lipgloss.Style + highlightStyle lipgloss.Style + headerStyle lipgloss.Style + rowStyleFunc func(RowStyleFuncInput) lipgloss.Style + border Border + selectedText string + unselectedText string + + // Header + headerVisible bool + + // Footers + footerVisible bool + staticFooter string + + // Pagination + pageSize int + currentPage int + paginationWrapping bool + + // Sorting, where a stable sort is applied from first element to last so + // that elements are grouped by the later elements. + sortOrder []SortColumn + + // Filter + filtered bool + filterTextInput textinput.Model + filterFunc FilterFunc + + // For flex columns + targetTotalWidth int + + // The maximum total width for overflow/scrolling + maxTotalWidth int + + // Internal cached calculations for reference, may be higher than + // maxTotalWidth. If this is the case, we need to adjust the view + totalWidth int + + // How far to scroll to the right, in columns + horizontalScrollOffsetCol int + + // How many columns to freeze when scrolling horizontally + horizontalScrollFreezeColumnsCount int + + // Calculated maximum column we can scroll to before the last is displayed + maxHorizontalColumnIndex int + + // Minimum total height of the table + minimumHeight int + + // Internal cached calculation, the height of the header and footer + // including borders. Used to determine how many padding rows to add. + metaHeight int + + // If true, the table will be multiline + multiline bool +} + +// New creates a new table ready for further modifications. +func New(columns []Column) Model { + filterInput := textinput.New() + filterInput.Prompt = "/" + model := Model{ + columns: make([]Column, len(columns)), + metadata: make(map[string]any), + highlightStyle: defaultHighlightStyle.Copy(), + border: borderDefault, + headerVisible: true, + footerVisible: true, + keyMap: DefaultKeyMap(), + + selectedText: "[x]", + unselectedText: "[ ]", + + filterTextInput: filterInput, + filterFunc: filterFuncContains, + baseStyle: lipgloss.NewStyle().Align(lipgloss.Right), + + paginationWrapping: true, + } + + // Do a full deep copy to avoid unexpected edits + copy(model.columns, columns) + + model.recalculateWidth() + + return model +} + +// Init initializes the table per the Bubble Tea architecture. +func (m Model) Init() tea.Cmd { + return nil +} diff --git a/vendor/github.com/evertras/bubble-table/table/options.go b/vendor/github.com/evertras/bubble-table/table/options.go new file mode 100644 index 00000000..c35fc1e7 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/options.go @@ -0,0 +1,510 @@ +package table + +import ( + "github.com/charmbracelet/bubbles/key" + "github.com/charmbracelet/bubbles/textinput" + "github.com/charmbracelet/lipgloss" +) + +// RowStyleFuncInput is the input to the style function that can +// be applied to each row. This is useful for things like zebra +// striping or other data-based styles. +// +// Note that we use a struct here to allow for future expansion +// while keeping backwards compatibility. +type RowStyleFuncInput struct { + // Index is the index of the row, starting at 0. + Index int + + // Row is the full row data. + Row Row + + // IsHighlighted is true if the row is currently highlighted. + IsHighlighted bool +} + +// WithRowStyleFunc sets a function that can be used to apply a style to each row +// based on the row data. This is useful for things like zebra striping or other +// data-based styles. It can be safely set to nil to remove it later. +// This style is applied after the base style and before individual row styles. +// This will override any HighlightStyle settings. +func (m Model) WithRowStyleFunc(f func(RowStyleFuncInput) lipgloss.Style) Model { + m.rowStyleFunc = f + + return m +} + +// WithHighlightedRow sets the highlighted row to the given index. +func (m Model) WithHighlightedRow(index int) Model { + m.rowCursorIndex = index + + if m.rowCursorIndex >= len(m.GetVisibleRows()) { + m.rowCursorIndex = len(m.GetVisibleRows()) - 1 + } + + if m.rowCursorIndex < 0 { + m.rowCursorIndex = 0 + } + + m.currentPage = m.expectedPageForRowIndex(m.rowCursorIndex) + + return m +} + +// HeaderStyle sets the style to apply to the header text, such as color or bold. +func (m Model) HeaderStyle(style lipgloss.Style) Model { + m.headerStyle = style.Copy() + + return m +} + +// WithRows sets the rows to show as data in the table. +func (m Model) WithRows(rows []Row) Model { + m.rows = rows + m.visibleRowCacheUpdated = false + + if m.rowCursorIndex >= len(m.rows) { + m.rowCursorIndex = len(m.rows) - 1 + } + + if m.rowCursorIndex < 0 { + m.rowCursorIndex = 0 + } + + if m.pageSize != 0 { + maxPage := m.MaxPages() + + // MaxPages is 1-index, currentPage is 0 index + if maxPage <= m.currentPage { + m.pageLast() + } + } + + return m +} + +// WithKeyMap sets the key map to use for controls when focused. +func (m Model) WithKeyMap(keyMap KeyMap) Model { + m.keyMap = keyMap + + return m +} + +// KeyMap returns a copy of the current key map in use. +func (m Model) KeyMap() KeyMap { + return m.keyMap +} + +// SelectableRows sets whether or not rows are selectable. If set, adds a column +// in the front that acts as a checkbox and responds to controls if Focused. +func (m Model) SelectableRows(selectable bool) Model { + m.selectableRows = selectable + + hasSelectColumn := len(m.columns) > 0 && m.columns[0].key == columnKeySelect + + if hasSelectColumn != selectable { + if selectable { + m.columns = append([]Column{ + NewColumn(columnKeySelect, m.selectedText, len([]rune(m.selectedText))), + }, m.columns...) + } else { + m.columns = m.columns[1:] + } + } + + m.recalculateWidth() + + return m +} + +// HighlightedRow returns the full Row that's currently highlighted by the user. +func (m Model) HighlightedRow() Row { + if len(m.GetVisibleRows()) > 0 { + return m.GetVisibleRows()[m.rowCursorIndex] + } + + // TODO: Better way to do this without pointers/nil? Or should it be nil? + return Row{} +} + +// SelectedRows returns all rows that have been set as selected by the user. +func (m Model) SelectedRows() []Row { + selectedRows := []Row{} + + for _, row := range m.GetVisibleRows() { + if row.selected { + selectedRows = append(selectedRows, row) + } + } + + return selectedRows +} + +// HighlightStyle sets a custom style to use when the row is being highlighted +// by the cursor. This should not be used with WithRowStyleFunc. Instead, use +// the IsHighlighted field in the style function. +func (m Model) HighlightStyle(style lipgloss.Style) Model { + m.highlightStyle = style + + return m +} + +// Focused allows the table to show highlighted rows and take in controls of +// up/down/space/etc to let the user navigate the table and interact with it. +func (m Model) Focused(focused bool) Model { + m.focused = focused + + return m +} + +// Filtered allows the table to show rows that match the filter. +func (m Model) Filtered(filtered bool) Model { + m.filtered = filtered + m.visibleRowCacheUpdated = false + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// StartFilterTyping focuses the text input to allow user typing to filter. +func (m Model) StartFilterTyping() Model { + m.filterTextInput.Focus() + + return m +} + +// WithStaticFooter adds a footer that only displays the given text. +func (m Model) WithStaticFooter(footer string) Model { + m.staticFooter = footer + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// WithPageSize enables pagination using the given page size. This can be called +// again at any point to resize the height of the table. +func (m Model) WithPageSize(pageSize int) Model { + m.pageSize = pageSize + + maxPages := m.MaxPages() + + if m.currentPage >= maxPages { + m.currentPage = maxPages - 1 + } + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// WithNoPagination disables pagination in the table. +func (m Model) WithNoPagination() Model { + m.pageSize = 0 + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// WithPaginationWrapping sets whether to wrap around from the beginning to the +// end when navigating through pages. Defaults to true. +func (m Model) WithPaginationWrapping(wrapping bool) Model { + m.paginationWrapping = wrapping + + return m +} + +// WithSelectedText describes what text to show when selectable rows are enabled. +// The selectable column header will use the selected text string. +func (m Model) WithSelectedText(unselected, selected string) Model { + m.selectedText = selected + m.unselectedText = unselected + + if len(m.columns) > 0 && m.columns[0].key == columnKeySelect { + m.columns[0] = NewColumn(columnKeySelect, m.selectedText, len([]rune(m.selectedText))) + m.recalculateWidth() + } + + return m +} + +// WithBaseStyle applies a base style as the default for everything in the table. +// This is useful for border colors, default alignment, default color, etc. +func (m Model) WithBaseStyle(style lipgloss.Style) Model { + m.baseStyle = style + + return m +} + +// WithTargetWidth sets the total target width of the table, including borders. +// This only takes effect when using flex columns. When using flex columns, +// columns will stretch to fill out to the total width given here. +func (m Model) WithTargetWidth(totalWidth int) Model { + m.targetTotalWidth = totalWidth + + m.recalculateWidth() + + return m +} + +// WithMinimumHeight sets the minimum total height of the table, including borders. +func (m Model) WithMinimumHeight(minimumHeight int) Model { + m.minimumHeight = minimumHeight + + m.recalculateHeight() + + return m +} + +// PageDown goes to the next page of a paginated table, wrapping to the first +// page if the table is already on the last page. +func (m Model) PageDown() Model { + m.pageDown() + + return m +} + +// PageUp goes to the previous page of a paginated table, wrapping to the +// last page if the table is already on the first page. +func (m Model) PageUp() Model { + m.pageUp() + + return m +} + +// PageLast goes to the last page of a paginated table. +func (m Model) PageLast() Model { + m.pageLast() + + return m +} + +// PageFirst goes to the first page of a paginated table. +func (m Model) PageFirst() Model { + m.pageFirst() + + return m +} + +// WithCurrentPage sets the current page (1 as the first page) of a paginated +// table, bounded to the total number of pages. The current selected row will +// be set to the top row of the page if the page changed. +func (m Model) WithCurrentPage(currentPage int) Model { + if m.pageSize == 0 || currentPage == m.CurrentPage() { + return m + } + if currentPage < 1 { + currentPage = 1 + } else { + maxPages := m.MaxPages() + + if currentPage > maxPages { + currentPage = maxPages + } + } + m.currentPage = currentPage - 1 + m.rowCursorIndex = m.currentPage * m.pageSize + + return m +} + +// WithColumns sets the visible columns for the table, so that columns can be +// added/removed/resized or headers rewritten. +func (m Model) WithColumns(columns []Column) Model { + // Deep copy to avoid edits + m.columns = make([]Column, len(columns)) + copy(m.columns, columns) + + m.recalculateWidth() + + if m.selectableRows { + // Re-add the selectable column + m = m.SelectableRows(true) + } + + return m +} + +// WithFilterInput makes the table use the provided text input bubble for +// filtering rather than using the built-in default. This allows for external +// text input controls to be used. +func (m Model) WithFilterInput(input textinput.Model) Model { + if m.filterTextInput.Value() != input.Value() { + m.pageFirst() + } + + m.filterTextInput = input + m.visibleRowCacheUpdated = false + + return m +} + +// WithFilterInputValue sets the filter value to the given string, immediately +// applying it as if the user had typed it in. Useful for external filter inputs +// that are not necessarily a text input. +func (m Model) WithFilterInputValue(value string) Model { + if m.filterTextInput.Value() != value { + m.pageFirst() + } + + m.filterTextInput.SetValue(value) + m.filterTextInput.Blur() + m.visibleRowCacheUpdated = false + + return m +} + +// WithFilterFunc adds a filter function to the model. If the function returns +// true, the row will be included in the filtered results. If the function +// is nil, the function won't be used and instead the default filtering will be applied, +// if any. +func (m Model) WithFilterFunc(shouldInclude FilterFunc) Model { + m.filterFunc = shouldInclude + + m.visibleRowCacheUpdated = false + + return m +} + +// WithFuzzyFilter enables fuzzy filtering for the table. +func (m Model) WithFuzzyFilter() Model { + return m.WithFilterFunc(filterFuncFuzzy) +} + +// WithFooterVisibility sets the visibility of the footer. +func (m Model) WithFooterVisibility(visibility bool) Model { + m.footerVisible = visibility + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// WithHeaderVisibility sets the visibility of the header. +func (m Model) WithHeaderVisibility(visibility bool) Model { + m.headerVisible = visibility + + if m.minimumHeight > 0 { + m.recalculateHeight() + } + + return m +} + +// WithMaxTotalWidth sets the maximum total width that the table should render. +// If this width is exceeded by either the target width or by the total width +// of all the columns (including borders!), anything extra will be treated as +// overflow and horizontal scrolling will be enabled to see the rest. +func (m Model) WithMaxTotalWidth(maxTotalWidth int) Model { + m.maxTotalWidth = maxTotalWidth + + m.recalculateWidth() + + return m +} + +// WithHorizontalFreezeColumnCount freezes the given number of columns to the +// left side. This is useful for things like ID or Name columns that should +// always be visible even when scrolling. +func (m Model) WithHorizontalFreezeColumnCount(columnsToFreeze int) Model { + m.horizontalScrollFreezeColumnsCount = columnsToFreeze + + m.recalculateWidth() + + return m +} + +// ScrollRight moves one column to the right. Use with WithMaxTotalWidth. +func (m Model) ScrollRight() Model { + m.scrollRight() + + return m +} + +// ScrollLeft moves one column to the left. Use with WithMaxTotalWidth. +func (m Model) ScrollLeft() Model { + m.scrollLeft() + + return m +} + +// WithMissingDataIndicator sets an indicator to use when data for a column is +// not found in a given row. Note that this is for completely missing data, +// an empty string or other zero value that is explicitly set is not considered +// to be missing. +func (m Model) WithMissingDataIndicator(str string) Model { + m.missingDataIndicator = str + + return m +} + +// WithMissingDataIndicatorStyled sets a styled indicator to use when data for +// a column is not found in a given row. Note that this is for completely +// missing data, an empty string or other zero value that is explicitly set is +// not considered to be missing. +func (m Model) WithMissingDataIndicatorStyled(styled StyledCell) Model { + m.missingDataIndicator = styled + + return m +} + +// WithAllRowsDeselected deselects any rows that are currently selected. +func (m Model) WithAllRowsDeselected() Model { + rows := m.GetVisibleRows() + + for i, row := range rows { + if row.selected { + rows[i] = row.Selected(false) + } + } + + m.rows = rows + + return m +} + +// WithMultiline sets whether or not to wrap text in cells to multiple lines. +func (m Model) WithMultiline(multiline bool) Model { + m.multiline = multiline + + return m +} + +// WithAdditionalShortHelpKeys enables you to add more keybindings to the 'short help' view. +func (m Model) WithAdditionalShortHelpKeys(keys []key.Binding) Model { + m.additionalShortHelpKeys = func() []key.Binding { + return keys + } + + return m +} + +// WithAdditionalFullHelpKeys enables you to add more keybindings to the 'full help' view. +func (m Model) WithAdditionalFullHelpKeys(keys []key.Binding) Model { + m.additionalFullHelpKeys = func() []key.Binding { + return keys + } + + return m +} + +// WithGlobalMetadata applies the given metadata to the table. This metadata is passed to +// some functions in FilterFuncInput and StyleFuncInput to enable more advanced decisions, +// such as setting some global theme variable to reference, etc. Has no effect otherwise. +func (m Model) WithGlobalMetadata(metadata map[string]any) Model { + m.metadata = metadata + + return m +} diff --git a/vendor/github.com/evertras/bubble-table/table/overflow.go b/vendor/github.com/evertras/bubble-table/table/overflow.go new file mode 100644 index 00000000..19c5b0aa --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/overflow.go @@ -0,0 +1,18 @@ +package table + +import "github.com/charmbracelet/lipgloss" + +const columnKeyOverflowRight = "___overflow_r___" +const columnKeyOverflowLeft = "___overflow_l__" + +func genOverflowStyle(base lipgloss.Style, width int) lipgloss.Style { + return base.Width(width).Align(lipgloss.Right) +} + +func genOverflowColumnRight(width int) Column { + return NewColumn(columnKeyOverflowRight, ">", width) +} + +func genOverflowColumnLeft(width int) Column { + return NewColumn(columnKeyOverflowLeft, "<", width) +} diff --git a/vendor/github.com/evertras/bubble-table/table/pagination.go b/vendor/github.com/evertras/bubble-table/table/pagination.go new file mode 100644 index 00000000..6fce9b51 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/pagination.go @@ -0,0 +1,112 @@ +package table + +// PageSize returns the current page size for the table, or 0 if there is no +// pagination enabled. +func (m *Model) PageSize() int { + return m.pageSize +} + +// CurrentPage returns the current page that the table is on, starting from an +// index of 1. +func (m *Model) CurrentPage() int { + return m.currentPage + 1 +} + +// MaxPages returns the maximum number of pages that are visible. +func (m *Model) MaxPages() int { + totalRows := len(m.GetVisibleRows()) + + if m.pageSize == 0 || totalRows == 0 { + return 1 + } + + return (totalRows-1)/m.pageSize + 1 +} + +// TotalRows returns the current total row count of the table. If the table is +// paginated, this is the total number of rows across all pages. +func (m *Model) TotalRows() int { + return len(m.GetVisibleRows()) +} + +// VisibleIndices returns the current visible rows by their 0 based index. +// Useful for custom pagination footers. +func (m *Model) VisibleIndices() (start, end int) { + totalRows := len(m.GetVisibleRows()) + + if m.pageSize == 0 { + start = 0 + end = totalRows - 1 + + return start, end + } + + start = m.pageSize * m.currentPage + end = start + m.pageSize - 1 + + if end >= totalRows { + end = totalRows - 1 + } + + return start, end +} + +func (m *Model) pageDown() { + if m.pageSize == 0 || len(m.GetVisibleRows()) <= m.pageSize { + return + } + + m.currentPage++ + + maxPageIndex := m.MaxPages() - 1 + + if m.currentPage > maxPageIndex { + if m.paginationWrapping { + m.currentPage = 0 + } else { + m.currentPage = maxPageIndex + } + } + + m.rowCursorIndex = m.currentPage * m.pageSize +} + +func (m *Model) pageUp() { + if m.pageSize == 0 || len(m.GetVisibleRows()) <= m.pageSize { + return + } + + m.currentPage-- + + maxPageIndex := m.MaxPages() - 1 + + if m.currentPage < 0 { + if m.paginationWrapping { + m.currentPage = maxPageIndex + } else { + m.currentPage = 0 + } + } + + m.rowCursorIndex = m.currentPage * m.pageSize +} + +func (m *Model) pageFirst() { + m.currentPage = 0 + m.rowCursorIndex = 0 +} + +func (m *Model) pageLast() { + m.currentPage = m.MaxPages() - 1 + m.rowCursorIndex = m.currentPage * m.pageSize +} + +func (m *Model) expectedPageForRowIndex(rowIndex int) int { + if m.pageSize == 0 { + return 0 + } + + expectedPage := rowIndex / m.pageSize + + return expectedPage +} diff --git a/vendor/github.com/evertras/bubble-table/table/query.go b/vendor/github.com/evertras/bubble-table/table/query.go new file mode 100644 index 00000000..79da50f0 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/query.go @@ -0,0 +1,96 @@ +package table + +// GetColumnSorting returns the current sorting rules for the table as a list of +// SortColumns, which are applied from first to last. This means that data will +// be grouped by the later elements in the list. The returned list is a copy +// and modifications will have no effect. +func (m *Model) GetColumnSorting() []SortColumn { + c := make([]SortColumn, len(m.sortOrder)) + + copy(c, m.sortOrder) + + return c +} + +// GetCanFilter returns true if the table enables filtering at all. This does +// not say whether a filter is currently active, only that the feature is enabled. +func (m *Model) GetCanFilter() bool { + return m.filtered +} + +// GetIsFilterActive returns true if the table is currently being filtered. This +// does not say whether the table CAN be filtered, only whether or not a filter +// is actually currently being applied. +func (m *Model) GetIsFilterActive() bool { + return m.filterTextInput.Value() != "" +} + +// GetIsFilterInputFocused returns true if the table's built-in filter input is +// currently focused. +func (m *Model) GetIsFilterInputFocused() bool { + return m.filterTextInput.Focused() +} + +// GetCurrentFilter returns the current filter text being applied, or an empty +// string if none is applied. +func (m *Model) GetCurrentFilter() string { + return m.filterTextInput.Value() +} + +// GetVisibleRows returns sorted and filtered rows. +func (m *Model) GetVisibleRows() []Row { + if m.visibleRowCacheUpdated { + return m.visibleRowCache + } + + rows := make([]Row, len(m.rows)) + copy(rows, m.rows) + if m.filtered { + rows = m.getFilteredRows(rows) + } + rows = getSortedRows(m.sortOrder, rows) + + m.visibleRowCache = rows + m.visibleRowCacheUpdated = true + + return rows +} + +// GetHighlightedRowIndex returns the index of the Row that's currently highlighted +// by the user. +func (m *Model) GetHighlightedRowIndex() int { + return m.rowCursorIndex +} + +// GetFocused returns whether or not the table is focused and is receiving inputs. +func (m *Model) GetFocused() bool { + return m.focused +} + +// GetHorizontalScrollColumnOffset returns how many columns to the right the table +// has been scrolled. 0 means the table is all the way to the left, which is +// the starting default. +func (m *Model) GetHorizontalScrollColumnOffset() int { + return m.horizontalScrollOffsetCol +} + +// GetHeaderVisibility returns true if the header has been set to visible (default) +// or false if the header has been set to hidden. +func (m *Model) GetHeaderVisibility() bool { + return m.headerVisible +} + +// GetFooterVisibility returns true if the footer has been set to +// visible (default) or false if the footer has been set to hidden. +// Note that even if the footer is visible it will only be rendered if +// it has contents. +func (m *Model) GetFooterVisibility() bool { + return m.footerVisible +} + +// GetPaginationWrapping returns true if pagination wrapping is enabled, or false +// if disabled. If disabled, navigating through pages will stop at the first +// and last pages. +func (m *Model) GetPaginationWrapping() bool { + return m.paginationWrapping +} diff --git a/vendor/github.com/evertras/bubble-table/table/row.go b/vendor/github.com/evertras/bubble-table/table/row.go new file mode 100644 index 00000000..b9a5d6bb --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/row.go @@ -0,0 +1,252 @@ +package table + +import ( + "fmt" + "sync/atomic" + + "github.com/charmbracelet/lipgloss" + "github.com/muesli/reflow/wordwrap" +) + +// RowData is a map of string column keys to arbitrary data. Data with a key +// that matches a column key will be displayed. Data with a key that does not +// match a column key will not be displayed, but will remain attached to the Row. +// This can be useful for attaching hidden metadata for future reference when +// retrieving rows. +type RowData map[string]any + +// Row represents a row in the table with some data keyed to the table columns> +// Can have a style applied to it such as color/bold. Create using NewRow(). +type Row struct { + Style lipgloss.Style + Data RowData + + selected bool + + // id is an internal unique ID to match rows after they're copied + id uint32 +} + +var lastRowID uint32 = 1 + +// NewRow creates a new row and copies the given row data. +func NewRow(data RowData) Row { + row := Row{ + Data: make(map[string]any), + id: lastRowID, + } + + atomic.AddUint32(&lastRowID, 1) + + for key, val := range data { + // Doesn't deep copy val, but close enough for now... + row.Data[key] = val + } + + return row +} + +// WithStyle uses the given style for the text in the row. +func (r Row) WithStyle(style lipgloss.Style) Row { + r.Style = style.Copy() + + return r +} + +//nolint:cyclop,funlen // Breaking this up will be more complicated than it's worth for now +func (m Model) renderRowColumnData(row Row, column Column, rowStyle lipgloss.Style, borderStyle lipgloss.Style) string { + cellStyle := rowStyle.Copy().Inherit(column.style).Inherit(m.baseStyle) + + var str string + + switch column.key { + case columnKeySelect: + if row.selected { + str = m.selectedText + } else { + str = m.unselectedText + } + case columnKeyOverflowRight: + cellStyle = cellStyle.Align(lipgloss.Right) + str = ">" + case columnKeyOverflowLeft: + str = "<" + default: + fmtString := "%v" + + var data any + + if entry, exists := row.Data[column.key]; exists { + data = entry + + if column.fmtString != "" { + fmtString = column.fmtString + } + } else if m.missingDataIndicator != nil { + data = m.missingDataIndicator + } else { + data = "" + } + + switch entry := data.(type) { + case StyledCell: + str = fmt.Sprintf(fmtString, entry.Data) + + if entry.StyleFunc != nil { + cellStyle = entry.StyleFunc(StyledCellFuncInput{ + Column: column, + Data: entry.Data, + Row: row, + GlobalMetadata: m.metadata, + }).Copy().Inherit(cellStyle) + } else { + cellStyle = entry.Style.Copy().Inherit(cellStyle) + } + default: + str = fmt.Sprintf(fmtString, entry) + } + } + + if m.multiline { + str = wordwrap.String(str, column.width) + cellStyle = cellStyle.Align(lipgloss.Top) + } else { + str = limitStr(str, column.width) + } + + cellStyle = cellStyle.Inherit(borderStyle) + cellStr := cellStyle.Render(str) + + return cellStr +} + +func (m Model) renderRow(rowIndex int, last bool) string { + row := m.GetVisibleRows()[rowIndex] + highlighted := rowIndex == m.rowCursorIndex + + rowStyle := row.Style.Copy() + + if m.rowStyleFunc != nil { + styleResult := m.rowStyleFunc(RowStyleFuncInput{ + Index: rowIndex, + Row: row, + IsHighlighted: m.focused && highlighted, + }) + + rowStyle = rowStyle.Inherit(styleResult) + } else if m.focused && highlighted { + rowStyle = rowStyle.Inherit(m.highlightStyle) + } + + return m.renderRowData(row, rowStyle, last) +} + +func (m Model) renderBlankRow(last bool) string { + return m.renderRowData(NewRow(nil), lipgloss.NewStyle(), last) +} + +// This is long and could use some refactoring in the future, but not quite sure +// how to pick it apart yet. +// +//nolint:funlen, cyclop +func (m Model) renderRowData(row Row, rowStyle lipgloss.Style, last bool) string { + numColumns := len(m.columns) + + columnStrings := []string{} + totalRenderedWidth := 0 + + stylesInner, stylesLast := m.styleRows() + + maxCellHeight := 1 + if m.multiline { + for _, column := range m.columns { + cellStr := m.renderRowColumnData(row, column, rowStyle, lipgloss.NewStyle()) + maxCellHeight = max(maxCellHeight, lipgloss.Height(cellStr)) + } + } + + for columnIndex, column := range m.columns { + var borderStyle lipgloss.Style + var rowStyles borderStyleRow + + if !last { + rowStyles = stylesInner + } else { + rowStyles = stylesLast + } + rowStyle = rowStyle.Copy().Height(maxCellHeight) + + if m.horizontalScrollOffsetCol > 0 && columnIndex == m.horizontalScrollFreezeColumnsCount { + var borderStyle lipgloss.Style + + if columnIndex == 0 { + borderStyle = rowStyles.left.Copy() + } else { + borderStyle = rowStyles.inner.Copy() + } + + rendered := m.renderRowColumnData(row, genOverflowColumnLeft(1), rowStyle, borderStyle) + + totalRenderedWidth += lipgloss.Width(rendered) + + columnStrings = append(columnStrings, rendered) + } + + if columnIndex >= m.horizontalScrollFreezeColumnsCount && + columnIndex < m.horizontalScrollOffsetCol+m.horizontalScrollFreezeColumnsCount { + continue + } + + if len(columnStrings) == 0 { + borderStyle = rowStyles.left + } else if columnIndex < numColumns-1 { + borderStyle = rowStyles.inner + } else { + borderStyle = rowStyles.right + } + + cellStr := m.renderRowColumnData(row, column, rowStyle, borderStyle) + + if m.maxTotalWidth != 0 { + renderedWidth := lipgloss.Width(cellStr) + + const ( + borderAdjustment = 1 + overflowColWidth = 2 + ) + + targetWidth := m.maxTotalWidth - overflowColWidth + + if columnIndex == len(m.columns)-1 { + // If this is the last header, we don't need to account for the + // overflow arrow column + targetWidth = m.maxTotalWidth + } + + if totalRenderedWidth+renderedWidth > targetWidth { + overflowWidth := m.maxTotalWidth - totalRenderedWidth - borderAdjustment + overflowStyle := genOverflowStyle(rowStyles.right, overflowWidth) + overflowColumn := genOverflowColumnRight(overflowWidth) + overflowStr := m.renderRowColumnData(row, overflowColumn, rowStyle, overflowStyle) + + columnStrings = append(columnStrings, overflowStr) + + break + } + + totalRenderedWidth += renderedWidth + } + + columnStrings = append(columnStrings, cellStr) + } + + return lipgloss.JoinHorizontal(lipgloss.Bottom, columnStrings...) +} + +// Selected returns a copy of the row that's set to be selected or deselected. +// The old row is not changed in-place. +func (r Row) Selected(selected bool) Row { + r.selected = selected + + return r +} diff --git a/vendor/github.com/evertras/bubble-table/table/scrolling.go b/vendor/github.com/evertras/bubble-table/table/scrolling.go new file mode 100644 index 00000000..3ee3256c --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/scrolling.go @@ -0,0 +1,50 @@ +package table + +func (m *Model) scrollRight() { + if m.horizontalScrollOffsetCol < m.maxHorizontalColumnIndex { + m.horizontalScrollOffsetCol++ + } +} + +func (m *Model) scrollLeft() { + if m.horizontalScrollOffsetCol > 0 { + m.horizontalScrollOffsetCol-- + } +} + +func (m *Model) recalculateLastHorizontalColumn() { + if m.horizontalScrollFreezeColumnsCount >= len(m.columns) { + m.maxHorizontalColumnIndex = 0 + + return + } + + if m.totalWidth <= m.maxTotalWidth { + m.maxHorizontalColumnIndex = 0 + + return + } + + const ( + leftOverflowWidth = 2 + borderAdjustment = 1 + ) + + // Always have left border + visibleWidth := borderAdjustment + leftOverflowWidth + + for i := 0; i < m.horizontalScrollFreezeColumnsCount; i++ { + visibleWidth += m.columns[i].width + borderAdjustment + } + + m.maxHorizontalColumnIndex = len(m.columns) - 1 + + // Work backwards from the right + for i := len(m.columns) - 1; i >= m.horizontalScrollFreezeColumnsCount && visibleWidth <= m.maxTotalWidth; i-- { + visibleWidth += m.columns[i].width + borderAdjustment + + if visibleWidth <= m.maxTotalWidth { + m.maxHorizontalColumnIndex = i - m.horizontalScrollFreezeColumnsCount + } + } +} diff --git a/vendor/github.com/evertras/bubble-table/table/sort.go b/vendor/github.com/evertras/bubble-table/table/sort.go new file mode 100644 index 00000000..9a282cbd --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/sort.go @@ -0,0 +1,178 @@ +package table + +import ( + "fmt" + "sort" +) + +// SortDirection indicates whether a column should sort by ascending or descending. +type SortDirection int + +const ( + // SortDirectionAsc indicates the column should be in ascending order. + SortDirectionAsc SortDirection = iota + + // SortDirectionDesc indicates the column should be in descending order. + SortDirectionDesc +) + +// SortColumn describes which column should be sorted and how. +type SortColumn struct { + ColumnKey string + Direction SortDirection +} + +// SortByAsc sets the main sorting column to the given key, in ascending order. +// If a previous sort was used, it is replaced by the given column each time +// this function is called. Values are sorted as numbers if possible, or just +// as simple string comparisons if not numbers. +func (m Model) SortByAsc(columnKey string) Model { + m.sortOrder = []SortColumn{ + { + ColumnKey: columnKey, + Direction: SortDirectionAsc, + }, + } + + m.visibleRowCacheUpdated = false + + return m +} + +// SortByDesc sets the main sorting column to the given key, in descending order. +// If a previous sort was used, it is replaced by the given column each time +// this function is called. Values are sorted as numbers if possible, or just +// as simple string comparisons if not numbers. +func (m Model) SortByDesc(columnKey string) Model { + m.sortOrder = []SortColumn{ + { + ColumnKey: columnKey, + Direction: SortDirectionDesc, + }, + } + + m.visibleRowCacheUpdated = false + + return m +} + +// ThenSortByAsc provides a secondary sort after the first, in ascending order. +// Can be chained multiple times, applying to smaller subgroups each time. +func (m Model) ThenSortByAsc(columnKey string) Model { + m.sortOrder = append([]SortColumn{ + { + ColumnKey: columnKey, + Direction: SortDirectionAsc, + }, + }, m.sortOrder...) + + m.visibleRowCacheUpdated = false + + return m +} + +// ThenSortByDesc provides a secondary sort after the first, in descending order. +// Can be chained multiple times, applying to smaller subgroups each time. +func (m Model) ThenSortByDesc(columnKey string) Model { + m.sortOrder = append([]SortColumn{ + { + ColumnKey: columnKey, + Direction: SortDirectionDesc, + }, + }, m.sortOrder...) + + m.visibleRowCacheUpdated = false + + return m +} + +type sortableTable struct { + rows []Row + byColumn SortColumn +} + +func (s *sortableTable) Len() int { + return len(s.rows) +} + +func (s *sortableTable) Swap(i, j int) { + old := s.rows[i] + s.rows[i] = s.rows[j] + s.rows[j] = old +} + +func (s *sortableTable) extractString(i int, column string) string { + iData, exists := s.rows[i].Data[column] + + if !exists { + return "" + } + + switch iData := iData.(type) { + case StyledCell: + return fmt.Sprintf("%v", iData.Data) + + case string: + return iData + + default: + return fmt.Sprintf("%v", iData) + } +} + +func (s *sortableTable) extractNumber(i int, column string) (float64, bool) { + iData, exists := s.rows[i].Data[column] + + if !exists { + return 0, false + } + + return asNumber(iData) +} + +func (s *sortableTable) Less(first, second int) bool { + firstNum, firstNumIsValid := s.extractNumber(first, s.byColumn.ColumnKey) + secondNum, secondNumIsValid := s.extractNumber(second, s.byColumn.ColumnKey) + + if firstNumIsValid && secondNumIsValid { + if s.byColumn.Direction == SortDirectionAsc { + return firstNum < secondNum + } + + return firstNum > secondNum + } + + firstVal := s.extractString(first, s.byColumn.ColumnKey) + secondVal := s.extractString(second, s.byColumn.ColumnKey) + + if s.byColumn.Direction == SortDirectionAsc { + return firstVal < secondVal + } + + return firstVal > secondVal +} + +func getSortedRows(sortOrder []SortColumn, rows []Row) []Row { + var sortedRows []Row + if len(sortOrder) == 0 { + sortedRows = rows + + return sortedRows + } + + sortedRows = make([]Row, len(rows)) + copy(sortedRows, rows) + + for _, byColumn := range sortOrder { + sorted := &sortableTable{ + rows: sortedRows, + byColumn: byColumn, + } + + sort.Stable(sorted) + + sortedRows = sorted.rows + } + + return sortedRows +} diff --git a/vendor/github.com/evertras/bubble-table/table/strlimit.go b/vendor/github.com/evertras/bubble-table/table/strlimit.go new file mode 100644 index 00000000..9889d831 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/strlimit.go @@ -0,0 +1,26 @@ +package table + +import ( + "strings" + + "github.com/muesli/reflow/ansi" + "github.com/muesli/reflow/truncate" +) + +func limitStr(str string, maxLen int) string { + if maxLen == 0 { + return "" + } + + newLineIndex := strings.Index(str, "\n") + if newLineIndex > -1 { + str = str[:newLineIndex] + "…" + } + + if ansi.PrintableRuneWidth(str) > maxLen { + // #nosec: G115 + return truncate.StringWithTail(str, uint(maxLen), "…") + } + + return str +} diff --git a/vendor/github.com/evertras/bubble-table/table/update.go b/vendor/github.com/evertras/bubble-table/table/update.go new file mode 100644 index 00000000..198a57d2 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/update.go @@ -0,0 +1,154 @@ +package table + +import ( + "github.com/charmbracelet/bubbles/key" + tea "github.com/charmbracelet/bubbletea" +) + +func (m *Model) moveHighlightUp() { + m.rowCursorIndex-- + + if m.rowCursorIndex < 0 { + m.rowCursorIndex = len(m.GetVisibleRows()) - 1 + } + + m.currentPage = m.expectedPageForRowIndex(m.rowCursorIndex) +} + +func (m *Model) moveHighlightDown() { + m.rowCursorIndex++ + + if m.rowCursorIndex >= len(m.GetVisibleRows()) { + m.rowCursorIndex = 0 + } + + m.currentPage = m.expectedPageForRowIndex(m.rowCursorIndex) +} + +func (m *Model) toggleSelect() { + if !m.selectableRows || len(m.GetVisibleRows()) == 0 { + return + } + + rows := m.GetVisibleRows() + + rowID := rows[m.rowCursorIndex].id + + currentSelectedState := false + + for i := range m.rows { + if m.rows[i].id == rowID { + currentSelectedState = m.rows[i].selected + m.rows[i].selected = !m.rows[i].selected + } + } + + m.visibleRowCacheUpdated = false + + m.appendUserEvent(UserEventRowSelectToggled{ + RowIndex: m.rowCursorIndex, + IsSelected: !currentSelectedState, + }) +} + +func (m Model) updateFilterTextInput(msg tea.Msg) (Model, tea.Cmd) { + var cmd tea.Cmd + switch msg := msg.(type) { + case tea.KeyMsg: + if key.Matches(msg, m.keyMap.FilterBlur) { + m.filterTextInput.Blur() + } + } + m.filterTextInput, cmd = m.filterTextInput.Update(msg) + m.pageFirst() + m.visibleRowCacheUpdated = false + + return m, cmd +} + +// This is a series of Matches tests with minimal logic +// +//nolint:cyclop +func (m *Model) handleKeypress(msg tea.KeyMsg) { + previousRowIndex := m.rowCursorIndex + + if key.Matches(msg, m.keyMap.RowDown) { + m.moveHighlightDown() + } + + if key.Matches(msg, m.keyMap.RowUp) { + m.moveHighlightUp() + } + + if key.Matches(msg, m.keyMap.RowSelectToggle) { + m.toggleSelect() + } + + if key.Matches(msg, m.keyMap.PageDown) { + m.pageDown() + } + + if key.Matches(msg, m.keyMap.PageUp) { + m.pageUp() + } + + if key.Matches(msg, m.keyMap.PageFirst) { + m.pageFirst() + } + + if key.Matches(msg, m.keyMap.PageLast) { + m.pageLast() + } + + if key.Matches(msg, m.keyMap.Filter) { + m.filterTextInput.Focus() + m.appendUserEvent(UserEventFilterInputFocused{}) + } + + if key.Matches(msg, m.keyMap.FilterClear) { + m.visibleRowCacheUpdated = false + m.filterTextInput.Reset() + } + + if key.Matches(msg, m.keyMap.ScrollRight) { + m.scrollRight() + } + + if key.Matches(msg, m.keyMap.ScrollLeft) { + m.scrollLeft() + } + + if m.rowCursorIndex != previousRowIndex { + m.appendUserEvent(UserEventHighlightedIndexChanged{ + PreviousRowIndex: previousRowIndex, + SelectedRowIndex: m.rowCursorIndex, + }) + } +} + +// Update responds to input from the user or other messages from Bubble Tea. +func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { + m.clearUserEvents() + + if !m.focused { + return m, nil + } + + if m.filterTextInput.Focused() { + var cmd tea.Cmd + m, cmd = m.updateFilterTextInput(msg) + + if !m.filterTextInput.Focused() { + m.appendUserEvent(UserEventFilterInputUnfocused{}) + } + + return m, cmd + } + + switch msg := msg.(type) { + case tea.KeyMsg: + m.handleKeypress(msg) + } + + return m, nil +} diff --git a/vendor/github.com/evertras/bubble-table/table/view.go b/vendor/github.com/evertras/bubble-table/table/view.go new file mode 100644 index 00000000..7be99c23 --- /dev/null +++ b/vendor/github.com/evertras/bubble-table/table/view.go @@ -0,0 +1,65 @@ +package table + +import ( + "strings" + + "github.com/charmbracelet/lipgloss" +) + +// View renders the table. It does not end in a newline, so that it can be +// composed with other elements more consistently. +// +//nolint:cyclop +func (m Model) View() string { + // Safety valve for empty tables + if len(m.columns) == 0 { + return "" + } + + body := strings.Builder{} + + rowStrs := make([]string, 0, 1) + + headers := m.renderHeaders() + + startRowIndex, endRowIndex := m.VisibleIndices() + numRows := endRowIndex - startRowIndex + 1 + + padding := m.calculatePadding(numRows) + + if m.headerVisible { + rowStrs = append(rowStrs, headers) + } else if numRows > 0 || padding > 0 { + //nolint: mnd // This is just getting the first newlined substring + split := strings.SplitN(headers, "\n", 2) + rowStrs = append(rowStrs, split[0]) + } + + for i := startRowIndex; i <= endRowIndex; i++ { + rowStrs = append(rowStrs, m.renderRow(i, padding == 0 && i == endRowIndex)) + } + + for i := 1; i <= padding; i++ { + rowStrs = append(rowStrs, m.renderBlankRow(i == padding)) + } + + var footer string + + if len(rowStrs) > 0 { + footer = m.renderFooter(lipgloss.Width(rowStrs[0]), false) + } else { + footer = m.renderFooter(lipgloss.Width(headers), true) + } + + if footer != "" { + rowStrs = append(rowStrs, footer) + } + + if len(rowStrs) == 0 { + return "" + } + + body.WriteString(lipgloss.JoinVertical(lipgloss.Left, rowStrs...)) + + return body.String() +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a..00000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c20..00000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90d..00000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f32..00000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483..00000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08b..00000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91..00000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e173..00000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd3..00000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173..00000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c..00000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57..00000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120..00000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c15..00000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa3919..00000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a7567..00000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad908..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475..00000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f..00000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdef..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 93722938..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc8..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f654..00000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b6..00000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf8..00000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/kevinburke/ssh_config/.gitattributes b/vendor/github.com/kevinburke/ssh_config/.gitattributes deleted file mode 100644 index 44db5818..00000000 --- a/vendor/github.com/kevinburke/ssh_config/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -testdata/dos-lines eol=crlf diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt index 311aeb1b..157361b2 100644 --- a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt +++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt @@ -5,5 +5,6 @@ Kevin Burke Mark Nevill Scott Lessans Sergey Lukjanov +Simon Josefsson Wayne Ashley Berry santosh653 <70637961+santosh653@users.noreply.github.com> diff --git a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md index d32a3f51..ba84b51b 100644 --- a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md +++ b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md @@ -1,5 +1,18 @@ # Changes +## Version 1.4 (released August 2025) + +- Remove .gitattributes file (which was used to test different line endings, and +caused issues in some build environments). + +## Version 1.3 (released February 2025) + +- Add go.mod file (although this project has no dependencies). + +- Various updates to CI and build environment + +- config: add UserSettings.ConfigFinder + ## Version 1.2 Previously, if a Host declaration or a value had trailing whitespace, that diff --git a/vendor/github.com/kevinburke/ssh_config/Makefile b/vendor/github.com/kevinburke/ssh_config/Makefile index df7ee728..4ee41ab5 100644 --- a/vendor/github.com/kevinburke/ssh_config/Makefile +++ b/vendor/github.com/kevinburke/ssh_config/Makefile @@ -1,19 +1,15 @@ BUMP_VERSION := $(GOPATH)/bin/bump_version -STATICCHECK := $(GOPATH)/bin/staticcheck WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap -$(STATICCHECK): - go get honnef.co/go/tools/cmd/staticcheck - -lint: $(STATICCHECK) +lint: go vet ./... - $(STATICCHECK) + go run honnef.co/go/tools/cmd/staticcheck@latest ./... -test: lint +test: @# the timeout helps guard against infinite recursion go test -timeout=250ms ./... -race-test: lint +race-test: go test -timeout=500ms -race ./... $(BUMP_VERSION): diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md index f14b2168..705605e2 100644 --- a/vendor/github.com/kevinburke/ssh_config/README.md +++ b/vendor/github.com/kevinburke/ssh_config/README.md @@ -82,11 +82,11 @@ file format. [blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/ [hostsfile]: https://github.com/kevinburke/hostsfile -## Donating +## Sponsorships -I don't get paid to maintain this project. Donations free up time to make -improvements to the library, and respond to bug reports. You can send donations -via Paypal's "Send Money" feature to kev@inburke.com. Donations are not tax -deductible in the USA. +Thank you very much to Tailscale and Indeed for sponsoring development of this +library. [Sponsors][sponsors] will get their names featured in the README. You can also reach out about a consulting engagement: https://burke.services + +[sponsors]: https://github.com/sponsors/kevinburke diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go index 00d815c1..bb7315a3 100644 --- a/vendor/github.com/kevinburke/ssh_config/config.go +++ b/vendor/github.com/kevinburke/ssh_config/config.go @@ -8,7 +8,7 @@ // the host name to match on ("example.com"), and the second argument is the key // you want to retrieve ("Port"). The keywords are case insensitive. // -// port := ssh_config.Get("myhost", "Port") +// port := ssh_config.Get("myhost", "Port") // // You can also manipulate an SSH config file and then print it or write it back // to disk. @@ -43,7 +43,7 @@ import ( "sync" ) -const version = "1.2" +const version = "1.4.0" var _ = version @@ -53,6 +53,8 @@ type configFinder func() string // files are parsed and cached the first time Get() or GetStrict() is called. type UserSettings struct { IgnoreErrors bool + customConfig *Config + customConfigFinder configFinder systemConfig *Config systemConfigFinder configFinder userConfig *Config @@ -203,6 +205,13 @@ func (u *UserSettings) GetStrict(alias, key string) (string, error) { if u.onceErr != nil && u.IgnoreErrors == false { return "", u.onceErr } + // TODO this is getting repetitive + if u.customConfig != nil { + val, err := findVal(u.customConfig, alias, key) + if err != nil || val != "" { + return val, err + } + } val, err := findVal(u.userConfig, alias, key) if err != nil || val != "" { return val, err @@ -228,6 +237,12 @@ func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { if u.onceErr != nil && u.IgnoreErrors == false { return nil, u.onceErr } + if u.customConfig != nil { + val, err := findAll(u.customConfig, alias, key) + if err != nil || val != nil { + return val, err + } + } val, err := findAll(u.userConfig, alias, key) if err != nil || val != nil { return val, err @@ -243,16 +258,38 @@ func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { return []string{}, nil } +// ConfigFinder will invoke f to try to find a ssh config file in a custom +// location on disk, instead of in /etc/ssh or $HOME/.ssh. f should return the +// name of a file containing SSH configuration. +// +// ConfigFinder must be invoked before any calls to Get or GetStrict and panics +// if f is nil. Most users should not need to use this function. +func (u *UserSettings) ConfigFinder(f func() string) { + if f == nil { + panic("cannot call ConfigFinder with nil function") + } + u.customConfigFinder = f +} + func (u *UserSettings) doLoadConfigs() { u.loadConfigs.Do(func() { - // can't parse user file, that's ok. var filename string + var err error + if u.customConfigFinder != nil { + filename = u.customConfigFinder() + u.customConfig, err = parseFile(filename) + // IsNotExist should be returned because a user specified this + // function - not existing likely means they made an error + if err != nil { + u.onceErr = err + } + return + } if u.userConfigFinder == nil { filename = userConfigFinder() } else { filename = u.userConfigFinder() } - var err error u.userConfig, err = parseFile(filename) //lint:ignore S1002 I prefer it this way if err != nil && os.IsNotExist(err) == false { diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go index 2b1e718c..fdd6ce9d 100644 --- a/vendor/github.com/kevinburke/ssh_config/parser.go +++ b/vendor/github.com/kevinburke/ssh_config/parser.go @@ -21,9 +21,9 @@ type sshParser struct { type sshParserStateFn func() sshParserStateFn // Formats and panics an error message based on a token -func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) { +func (p *sshParser) raiseErrorf(tok *token, msg string) { // TODO this format is ugly - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) + panic(tok.Position.String() + ": " + msg) } func (p *sshParser) raiseError(tok *token, err error) { @@ -118,7 +118,7 @@ func (p *sshParser) parseKV() sshParserStateFn { } pat, err := NewPattern(strPatterns[i]) if err != nil { - p.raiseErrorf(val, "Invalid host pattern: %v", err) + p.raiseErrorf(val, fmt.Sprintf("Invalid host pattern: %v", err)) return nil } patterns = append(patterns, pat) @@ -144,7 +144,7 @@ func (p *sshParser) parseKV() sshParserStateFn { return nil } if err != nil { - p.raiseErrorf(val, "Error parsing Include directive: %v", err) + p.raiseErrorf(val, fmt.Sprintf("Error parsing Include directive: %v", err)) return nil } lastHost.Nodes = append(lastHost.Nodes, inc) diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml new file mode 100644 index 00000000..1b695b62 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -0,0 +1,57 @@ +version: 2 + +builds: + - + id: "cpuid" + binary: cpuid + main: ./cmd/cpuid/main.go + env: + - CGO_ENABLED=0 + flags: + - -ldflags=-s -w + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm64 + goarm: + - 7 + +archives: + - + id: cpuid + name_template: "cpuid-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - LICENSE +checksum: + name_template: 'checksums.txt' +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "cpuid_package_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/cpuid + maintainer: Klaus Post + description: CPUID Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt new file mode 100644 index 00000000..2ef4714f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE new file mode 100644 index 00000000..5cec7ee9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md new file mode 100644 index 00000000..88d68d52 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -0,0 +1,512 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) +[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml) + +## installing + +`go get -u github.com/klauspost/cpuid/v2` using modules. +Drop `v2` for others. + +Installing binary: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +Or download binaries from release page: https://github.com/klauspost/cpuid/releases + +### Homebrew + +For macOS/Linux users, you can install via [brew](https://brew.sh/) + +```sh +$ brew install cpuid +``` + +## example + +```Go +package main + +import ( + "fmt" + "strings" + + . "github.com/klauspost/cpuid/v2" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) + fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ",")) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") + fmt.Println("Frequency", CPU.Hz, "hz") + + // Test if we have these specific features: + if CPU.Supports(SSE, SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: AMD Ryzen 9 3950X 16-Core Processor +PhysicalCores: 16 +ThreadsPerCore: 2 +LogicalCores: 32 +Family 23 Model: 113 Vendor ID: AMD +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 +Cacheline bytes: 64 +L1 Data Cache: 32768 bytes +L1 Instruction Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes +Frequency 0 hz +We have Streaming SIMD 2 Extensions +``` + +# usage + +The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. +A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. + +To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc. +This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported. + +Note that for some cpu/os combinations some features will not be detected. +`amd64` has rather good support and should work reliably on all platforms. + +Note that hypervisors may not pass through all CPU features through to the guest OS, +so even if your host supports a feature it may not be visible on guests. + +## arm64 feature detection + +Not all operating systems provide ARM features directly +and there is no safe way to do so for the rest. + +Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. +`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. + +A `DetectARM()` can be used if you are able to control your deployment, +it will detect CPU features, but may crash if the OS doesn't intercept the calls. +A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. + +Note that currently only features are detected on ARM, +no additional information is currently available. + +## flags + +It is possible to add flags that affects cpu detection. + +For this the `Flags()` command is provided. + +This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. + +This means that any detection used in `init()` functions will not contain these flags. + +Example: + +```Go +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/klauspost/cpuid/v2" +) + +func main() { + cpuid.Flags() + flag.Parse() + cpuid.Detect() + + // Test if we have these specific features: + if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +## commandline + +Download as binary from: https://github.com/klauspost/cpuid/releases + +Install from source: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +### Example + +``` +λ cpuid +Name: AMD Ryzen 9 3950X 16-Core Processor +Vendor String: AuthenticAMD +Vendor ID: AMD +PhysicalCores: 16 +Threads Per Core: 2 +Logical Cores: 32 +CPU Family 23 Model: 113 +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE +Microarchitecture level: 3 +Cacheline bytes: 64 +L1 Instruction Cache: 32768 bytes +L1 Data Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes + +``` +### JSON Output: + +``` +λ cpuid --json +{ + "BrandName": "AMD Ryzen 9 3950X 16-Core Processor", + "VendorID": 2, + "VendorString": "AuthenticAMD", + "PhysicalCores": 16, + "ThreadsPerCore": 2, + "LogicalCores": 32, + "Family": 23, + "Model": 113, + "CacheLine": 64, + "Hz": 0, + "BoostFreq": 0, + "Cache": { + "L1I": 32768, + "L1D": 32768, + "L2": 524288, + "L3": 16777216 + }, + "SGX": { + "Available": false, + "LaunchControl": false, + "SGX1Supported": false, + "SGX2Supported": false, + "MaxEnclaveSizeNot64": 0, + "MaxEnclaveSize64": 0, + "EPCSections": null + }, + "Features": [ + "ADX", + "AESNI", + "AVX", + "AVX2", + "BMI1", + "BMI2", + "CLMUL", + "CLZERO", + "CMOV", + "CMPXCHG8", + "CPBOOST", + "CX16", + "F16C", + "FMA3", + "FXSR", + "FXSROPT", + "HTT", + "HYPERVISOR", + "LAHF", + "LZCNT", + "MCAOVERFLOW", + "MMX", + "MMXEXT", + "MOVBE", + "NX", + "OSXSAVE", + "POPCNT", + "RDRAND", + "RDSEED", + "RDTSCP", + "SCE", + "SHA", + "SSE", + "SSE2", + "SSE3", + "SSE4", + "SSE42", + "SSE4A", + "SSSE3", + "SUCCOR", + "X87", + "XSAVE" + ], + "X64Level": 3 +} +``` + +### Check CPU microarch level + +``` +λ cpuid --check-level=3 +2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3. +Exit Code 0 + +λ cpuid --check-level=4 +2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3. +Exit Code 1 +``` + + +## Available flags + +### x86 & amd64 + +| Feature Flag | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) | +| AESNI | Advanced Encryption Standard New Instructions | +| AMD3DNOW | AMD 3DNOW | +| AMD3DNOWEXT | AMD 3DNowExt | +| AMXBF16 | Tile computational operations on BFLOAT16 numbers | +| AMXINT8 | Tile computational operations on 8-bit integers | +| AMXFP16 | Tile computational operations on FP16 numbers | +| AMXFP8 | Tile computational operations on FP8 numbers | +| AMXCOMPLEX | Tile computational operations on complex numbers | +| AMXTILE | Tile architecture | +| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile | +| AMXTRANSPOSE | Tile multiply where the first operand is transposed | +| APX_F | Intel APX | +| AVX | AVX functions | +| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | +| AVX10_128 | If set indicates that AVX10 128-bit vector support is present | +| AVX10_256 | If set indicates that AVX10 256-bit vector support is present | +| AVX10_512 | If set indicates that AVX10 512-bit vector support is present | +| AVX2 | AVX2 functions | +| AVX512BF16 | AVX-512 BFLOAT16 Instructions | +| AVX512BITALG | AVX-512 Bit Algorithms | +| AVX512BW | AVX-512 Byte and Word Instructions | +| AVX512CD | AVX-512 Conflict Detection Instructions | +| AVX512DQ | AVX-512 Doubleword and Quadword Instructions | +| AVX512ER | AVX-512 Exponential and Reciprocal Instructions | +| AVX512F | AVX-512 Foundation | +| AVX512FP16 | AVX-512 FP16 Instructions | +| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions | +| AVX512PF | AVX-512 Prefetch Instructions | +| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions | +| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 | +| AVX512VL | AVX-512 Vector Length Extensions | +| AVX512VNNI | AVX-512 Vector Neural Network Instructions | +| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q | +| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword | +| AVXIFMA | AVX-IFMA instructions | +| AVXNECONVERT | AVX-NE-CONVERT instructions | +| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | +| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | +| AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | +| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | +| BMI1 | Bit Manipulation Instruction Set 1 | +| BMI2 | Bit Manipulation Instruction Set 2 | +| CETIBT | Intel CET Indirect Branch Tracking | +| CETSS | Intel CET Shadow Stack | +| CLDEMOTE | Cache Line Demote | +| CLMUL | Carry-less Multiplication | +| CLZERO | CLZERO instruction supported | +| CMOV | i686 CMOV | +| CMPCCXADD | CMPCCXADD instructions | +| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB | +| CMPXCHG8 | CMPXCHG8 instruction | +| CPBOOST | Core Performance Boost | +| CPPC | AMD: Collaborative Processor Performance Control | +| CX16 | CMPXCHG16B Instruction | +| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ | +| ENQCMD | Enqueue Command | +| ERMS | Enhanced REP MOVSB/STOSB | +| F16C | Half-precision floating-point conversion | +| FLUSH_L1D | Flush L1D cache | +| FMA3 | Intel FMA 3. Does not imply AVX. | +| FMA4 | Bulldozer FMA4 functions | +| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide | +| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide | +| FSRM | Fast Short Rep Mov | +| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 | +| FXSROPT | FXSAVE/FXRSTOR optimizations | +| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. | +| HLE | Hardware Lock Elision | +| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR | +| HTT | Hyperthreading (enabled) | +| HWA | Hardware assert supported. Indicates support for MSRC001_10 | +| HYBRID_CPU | This part has CPUs of more than one type. | +| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors | +| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) | +| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR | +| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) | +| IBRS | AMD: Indirect Branch Restricted Speculation | +| IBRS_PREFERRED | AMD: IBRS is preferred over software solution | +| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection | +| IBS | Instruction Based Sampling (AMD) | +| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) | +| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) | +| IBSFFV | Instruction Based Sampling Feature (AMD) | +| IBSOPCNT | Instruction Based Sampling Feature (AMD) | +| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) | +| IBSOPSAM | Instruction Based Sampling Feature (AMD) | +| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) | +| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) | +| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported | +| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported | +| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | +| IBS_PREVENTHOST | Disallowing IBS use by the host supported | +| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| IDPRED_CTRL | IPRED_DIS | +| INT_WBINVD | WBINVD/WBNOINVD are interruptible. | +| INVLPGB | NVLPGB and TLBSYNC instruction supported | +| KEYLOCKER | Key locker | +| KEYLOCKERW | Key locker wide | +| LAHF | LAHF/SAHF in long mode | +| LAM | If set, CPU supports Linear Address Masking | +| LBRVIRT | LBR virtualization | +| LZCNT | LZCNT instruction | +| MCAOVERFLOW | MCA overflow recovery support. | +| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. | +| MCOMMIT | MCOMMIT instruction supported | +| MD_CLEAR | VERW clears CPU buffers | +| MMX | standard MMX | +| MMXEXT | SSE integer functions or AMD MMX ext | +| MOVBE | MOVBE instruction (big-endian) | +| MOVDIR64B | Move 64 Bytes as Direct Store | +| MOVDIRI | Move Doubleword as Direct Store | +| MOVSB_ZL | Fast Zero-Length MOVSB | +| MPX | Intel MPX (Memory Protection Extensions) | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MSRIRC | Instruction Retired Counter MSR available | +| MSRLIST | Read/Write List of Model Specific Registers | +| MSR_PAGEFLUSH | Page Flush MSR available | +| NRIPS | Indicates support for NRIP save on VMEXIT | +| NX | NX (No-Execute) bit | +| OSXSAVE | XSAVE enabled by OS | +| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | +| POPCNT | POPCNT instruction | +| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | Predictive Store Forward Disable | +| RDPRU | RDPRU instruction supported | +| RDRAND | RDRAND instruction is available | +| RDSEED | RDSEED instruction is available | +| RDTSCP | RDTSCP Instruction | +| RRSBA_CTRL | Restricted RSB Alternate | +| RTM | Restricted Transactional Memory | +| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | +| SERIALIZE | Serialize Instruction Execution | +| SEV | AMD Secure Encrypted Virtualization supported | +| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host | +| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported | +| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests | +| SEV_ES | AMD SEV Encrypted State supported | +| SEV_RESTRICTED | AMD SEV Restricted Injection supported | +| SEV_SNP | AMD SEV Secure Nested Paging supported | +| SGX | Software Guard Extensions | +| SGXLC | Software Guard Extensions Launch Control | +| SGXPQC | Software Guard Extensions 256-bit Encryption | +| SHA | Intel SHA Extensions | +| SME | AMD Secure Memory Encryption supported | +| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced | +| SM3_X86 | SM3 instructions | +| SM4_X86 | SM4 instructions | +| SPEC_CTRL_SSBD | Speculative Store Bypass Disable | +| SRBDS_CTRL | SRBDS mitigation MSR available | +| SSE | SSE functions | +| SSE2 | P4 SSE functions | +| SSE3 | Prescott SSE3 functions | +| SSE4 | Penryn SSE4.1 functions | +| SSE42 | Nehalem SSE4.2 functions | +| SSE4A | AMD Barcelona microarchitecture SSE4a instructions | +| SSSE3 | Conroe SSSE3 functions | +| STIBP | Single Thread Indirect Branch Predictors | +| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On | +| STOSB_SHORT | Fast short STOSB | +| SUCCOR | Software uncorrectable error containment and recovery capability. | +| SVM | AMD Secure Virtual Machine | +| SVMDA | Indicates support for the SVM decode assists. | +| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control | +| SVML | AMD SVM lock. Indicates support for SVM-Lock. | +| SVMNP | AMD SVM nested paging | +| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter | +| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold | +| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | +| SYSEE | SYSENTER and SYSEXIT instructions | +| TBM | AMD Trailing Bit Manipulation | +| TDX_GUEST | Intel Trust Domain Extensions Guest | +| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | +| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | +| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | +| TSA_L1_NO | AMD only: Not vulnerable to TSA-L1 | +| TSA_SQ_NO | AMD only: Not vulnerable to TSA-SQ | +| TSA_VERW_CLEAR | AMD: If set, the memory form of the VERW instruction may be used to help mitigate TSA | +| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 | +| TSXLDTRK | Intel TSX Suspend Load Address Tracking | +| VAES | Vector AES. AVX(512) versions requires additional checks. | +| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. | +| VMPL | AMD VM Permission Levels supported | +| VMSA_REGPROT | AMD VMSA Register Protection supported | +| VMX | Virtual Machine Extensions | +| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. | +| VTE | AMD Virtual Transparent Encryption supported | +| WAITPKG | TPAUSE, UMONITOR, UMWAIT | +| WBNOINVD | Write Back and Do Not Invalidate Cache | +| WRMSRNS | Non-Serializing Write to Model Specific Register | +| X87 | FPU | +| XGETBV1 | Supports XGETBV with ECX = 1 | +| XOP | Bulldozer XOP functions | +| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV | +| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. | +| XSAVEOPT | XSAVEOPT available | +| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS | + +# ARM features: + +| Feature Flag | Description | +|--------------|------------------------------------------------------------------| +| AESARM | AES instructions | +| ARMCPUID | Some CPU ID registers readable at user-level | +| ASIMD | Advanced SIMD | +| ASIMDDP | SIMD Dot Product | +| ASIMDHP | Advanced SIMD half-precision floating point | +| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) | +| ATOMICS | Large System Extensions (LSE) | +| CRC32 | CRC32/CRC32C instructions | +| DCPOP | Data cache clean to Point of Persistence (DC CVAP) | +| EVTSTRM | Generic timer | +| FCMA | Floatin point complex number addition and multiplication | +| FHM | FMLAL and FMLSL instructions | +| FP | Single-precision and double-precision floating point | +| FPHP | Half-precision floating point | +| GPA | Generic Pointer Authentication | +| JSCVT | Javascript-style double->int convert (FJCVTZS) | +| LRCPC | Weaker release consistency (LDAPR, etc) | +| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| RNDR | Random Number instructions | +| TLB | Outer Shareable and TLB range maintenance instructions | +| TS | Flag manipulation instructions | +| SHA1 | SHA-1 instructions (SHA1C, etc) | +| SHA2 | SHA-2 instructions (SHA256H, etc) | +| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | +| SHA512 | SHA512 instructions | +| SM3 | SM3 instructions | +| SM4 | SM4 instructions | +| SVE | Scalable Vector Extension | + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go new file mode 100644 index 00000000..9cf7738a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -0,0 +1,1679 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) as well as arm64 is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import ( + "flag" + "fmt" + "math" + "math/bits" + "os" + "runtime" + "strings" +) + +// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf +// and Processor Programming Reference (PPR) + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + VendorUnknown Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM + Bhyve + Hygon + SiS + RDC + + Ampere + ARM + Broadcom + Cavium + DEC + Fujitsu + Infineon + Motorola + NVIDIA + AMCC + Qualcomm + Marvell + + QEMU + QNX + ACRN + SRE + Apple + + lastVendor +) + +//go:generate stringer -type=FeatureID,Vendor + +// FeatureID is the ID of a specific cpu feature. +type FeatureID int + +const ( + // Keep index -1 as unknown + UNKNOWN = -1 + + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXFP8 // Tile computational operations on FP8 numbers + AMXTILE // Tile architecture + AMXTF32 // Tile architecture + AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile + AMXTRANSPOSE // Tile multiply where the first operand is transposed + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SGXPQC // Software Guard Extensions 256-bit Encryption + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SM3_X86 // SM3 instructions + SM4_X86 // SM4 instructions + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSA_L1_NO // AMD only: Not vulnerable to TSA-L1 + TSA_SQ_NO // AM onlyD: Not vulnerable to TSA-SQ + TSA_VERW_CLEAR // If set, the memory form of the VERW instruction may be used to help mitigate TSA + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + + // ARM features: + AESARM // AES instructions + ARMCPUID // Some CPU ID registers readable at user-level + ASIMD // Advanced SIMD + ASIMDDP // SIMD Dot Product + ASIMDHP // Advanced SIMD half-precision floating point + ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) + ATOMICS // Large System Extensions (LSE) + CRC32 // CRC32/CRC32C instructions + DCPOP // Data cache clean to Point of Persistence (DC CVAP) + EVTSTRM // Generic timer + FCMA // Floating point complex number addition and multiplication + FHM // FMLAL and FMLSL instructions + FP // Single-precision and double-precision floating point + FPHP // Half-precision floating point + GPA // Generic Pointer Authentication + JSCVT // Javascript-style double->int convert (FJCVTZS) + LRCPC // Weaker release consistency (LDAPR, etc) + PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + RNDR // Random Number instructions + TLB // Outer Shareable and TLB range maintenance instructions + TS // Flag manipulation instructions + SHA1 // SHA-1 instructions (SHA1C, etc) + SHA2 // SHA-2 instructions (SHA256H, etc) + SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + SHA512 // SHA512 instructions + SM3 // SM3 instructions + SM4 // SM4 instructions + SVE // Scalable Vector Extension + + // PMU + PMU_FIXEDCOUNTER_CYCLES + PMU_FIXEDCOUNTER_REFCYCLES + PMU_FIXEDCOUNTER_INSTRUCTIONS + PMU_FIXEDCOUNTER_TOPDOWN_SLOTS + + // Keep it last. It automatically defines the size of []flagSet + lastID + + firstID FeatureID = UNKNOWN + 1 +) + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + HypervisorVendorID Vendor // Hypervisor vendor + HypervisorVendorString string // Raw hypervisor vendor string + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected + } + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + PMU PerformanceMonitoringInfo // holds information about the PMU + + maxFunc uint32 + maxExFunc uint32 +} + +// PerformanceMonitoringInfo holds information about CPU performance monitoring capabilities. +// This is primarily populated from CPUID leaf 0xAh on x86 +type PerformanceMonitoringInfo struct { + // VersionID (x86 only): Version ID of architectural performance monitoring. + // A value of 0 means architectural performance monitoring is not supported or information is unavailable. + VersionID uint8 + // NumGPPMC: Number of General-Purpose Performance Monitoring Counters per logical processor. + // On ARM, this is derived from PMCR_EL0.N (number of event counters). + NumGPCounters uint8 + // GPPMCWidth: Bit width of General-Purpose Performance Monitoring Counters. + // On ARM, typically 64 for PMU event counters. + GPPMCWidth uint8 + // NumFixedPMC: Number of Fixed-Function Performance Counters. + // Valid on x86 if VersionID > 1. On ARM, this typically includes at least the cycle counter (PMCCNTR_EL0). + NumFixedPMC uint8 + // FixedPMCWidth: Bit width of Fixed-Function Performance Counters. + // Valid on x86 if VersionID > 1. On ARM, the cycle counter (PMCCNTR_EL0) is 64-bit. + FixedPMCWidth uint8 + // Raw register output from CPUID leaf 0xAh. + RawEBX uint32 + RawEAX uint32 + RawEDX uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) +var darwinHasAVX512 = func() bool { return false } + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data. +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + // Set defaults + CPU.ThreadsPerCore = 1 + CPU.Cache.L1I = -1 + CPU.Cache.L1D = -1 + CPU.Cache.L2 = -1 + CPU.Cache.L3 = -1 + safe := true + if detectArmFlag != nil { + safe = !*detectArmFlag + } + addInfo(&CPU, safe) + if displayFeats != nil && *displayFeats { + fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) + // Exit with non-zero so tests will print value. + os.Exit(1) + } + if disableFlag != nil { + s := strings.Split(*disableFlag, ",") + for _, feat := range s { + feat := ParseFeature(strings.TrimSpace(feat)) + if feat != UNKNOWN { + CPU.featureSet.unset(feat) + } + } + } +} + +// DetectARM will detect ARM64 features. +// This is NOT done automatically since it can potentially crash +// if the OS does not handle the command. +// If in the future this can be done safely this function may not +// do anything. +func DetectARM() { + addInfo(&CPU, false) +} + +var detectArmFlag *bool +var displayFeats *bool +var disableFlag *string + +// Flags will enable flags. +// This must be called *before* flag.Parse AND +// Detect must be called after the flags have been parsed. +// Note that this means that any detection used in init() functions +// will not contain these flags. +func Flags() { + disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") + displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") + detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") +} + +// Supports returns whether the CPU supports all of the requested features. +func (c CPUInfo) Supports(ids ...FeatureID) bool { + for _, id := range ids { + if !c.featureSet.inSet(id) { + return false + } + } + return true +} + +// Has allows for checking a single feature. +// Should be inlined by the compiler. +func (c *CPUInfo) Has(id FeatureID) bool { + return c.featureSet.inSet(id) +} + +// AnyOf returns whether the CPU supports one or more of the requested features. +func (c CPUInfo) AnyOf(ids ...FeatureID) bool { + for _, id := range ids { + if c.featureSet.inSet(id) { + return true + } + } + return false +} + +// Features contains several features combined for a fast check using +// CpuInfo.HasAll +type Features *flagSet + +// CombineFeatures allows to combine several features for a close to constant time lookup. +func CombineFeatures(ids ...FeatureID) Features { + var v flagSet + for _, id := range ids { + v.set(id) + } + return &v +} + +func (c *CPUInfo) HasAll(f Features) bool { + return c.featureSet.hasSetP(f) +} + +// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) +var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) +var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) + +// X64Level returns the microarchitecture level detected on the CPU. +// If features are lacking or non x64 mode, 0 is returned. +// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +func (c CPUInfo) X64Level() int { + if !c.featureSet.hasOneOf(oneOfLevel) { + return 0 + } + if c.featureSet.hasSetP(level4Features) { + return 4 + } + if c.featureSet.hasSetP(level3Features) { + return 3 + } + if c.featureSet.hasSetP(level2Features) { + return 2 + } + if c.featureSet.hasSetP(level1Features) { + return 1 + } + return 0 +} + +// Disable will disable one or several features. +func (c *CPUInfo) Disable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.unset(id) + } + return true +} + +// Enable will disable one or several features even if they were undetected. +// This is of course not recommended for obvious reasons. +func (c *CPUInfo) Enable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.set(id) + } + return true +} + +// IsVendor returns true if vendor is recognized as Intel +func (c CPUInfo) IsVendor(v Vendor) bool { + return c.VendorID == v +} + +// FeatureSet returns all available features as strings. +func (c CPUInfo) FeatureSet() []string { + s := make([]string, 0, c.featureSet.nEnabled()) + s = append(s, c.featureSet.Strings()...) + return s +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.Has(RDTSCP) { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.Has(RDTSCP) { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// SveLengths returns arm SVE vector and predicate lengths in bits. +// Will return 0, 0 if SVE is not enabled or otherwise unable to detect. +func (c CPUInfo) SveLengths() (vl, pl uint64) { + if !c.Has(SVE) { + return 0, 0 + } + return getVectorLength() +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is +// supported, use it, otherwise parse the brand string. Yes, really. +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 + mfi := maxFunctionID() + if mfi >= 0x15 { + eax, ebx, ecx, _ := cpuid(0x15) + if eax != 0 && ebx != 0 && ecx != 0 { + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) + } + } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + + // computeHz determines the official rated speed of a CPU from its brand + // string. This insanity is *actually the official documented way to do + // this according to Intel*, prior to leaf 0x15 existing. The official + // documentation only shows this working for exactly `x.xx` or `xxxx` + // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other + // sizes. + model := c.BrandName + hz := strings.LastIndex(model, "Hz") + if hz < 3 { + return + } + var multiplier int64 + switch model[hz-1] { + case 'M': + multiplier = 1000 * 1000 + case 'G': + multiplier = 1000 * 1000 * 1000 + case 'T': + multiplier = 1000 * 1000 * 1000 * 1000 + } + if multiplier == 0 { + return + } + freq := int64(0) + divisor := int64(0) + decimalShift := int64(1) + var i int + for i = hz - 2; i >= 0 && model[i] != ' '; i-- { + if model[i] >= '0' && model[i] <= '9' { + freq += int64(model[i]-'0') * decimalShift + decimalShift *= 10 + } else if model[i] == '.' { + if divisor != 0 { + return + } + divisor = decimalShift + } else { + return + } + } + // we didn't find a space + if i < 0 { + return + } + if divisor != 0 { + c.Hz = (freq * multiplier) / divisor + return + } + c.Hz = freq * multiplier +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. +func (c CPUInfo) VM() bool { + return CPU.featureSet.inSet(HYPERVISOR) +} + +// flags contains detected cpu features and characteristics +type flags uint64 + +// log2(bits_in_uint64) +const flagBitsLog2 = 6 +const flagBits = 1 << flagBitsLog2 +const flagMask = flagBits - 1 + +// flagSet contains detected cpu features and characteristics in an array of flags +type flagSet [(lastID + flagMask) / flagBits]flags + +func (s *flagSet) inSet(feat FeatureID) bool { + return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 +} + +func (s *flagSet) set(feat FeatureID) { + s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) +} + +// setIf will set a feature if boolean is true. +func (s *flagSet) setIf(cond bool, features ...FeatureID) { + if cond { + for _, offset := range features { + s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) + } + } +} + +func (s *flagSet) unset(offset FeatureID) { + bit := flags(1 << (offset & flagMask)) + s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit +} + +// or with another flagset. +func (s *flagSet) or(other flagSet) { + for i, v := range other[:] { + s[i] |= v + } +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSet(other flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSetP(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasOneOf returns whether one or more features are present. +func (s *flagSet) hasOneOf(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != 0 { + return true + } + } + return false +} + +// nEnabled will return the number of enabled flags. +func (s *flagSet) nEnabled() (n int) { + for _, v := range s[:] { + n += bits.OnesCount64(uint64(v)) + } + return n +} + +func flagSetWith(feat ...FeatureID) flagSet { + var res flagSet + for _, f := range feat { + res.set(f) + } + return res +} + +// ParseFeature will parse the string and return the ID of the matching feature. +// Will return UNKNOWN if not found. +func ParseFeature(s string) FeatureID { + s = strings.ToUpper(s) + for i := firstID; i < lastID; i++ { + if i.String() == s { + return i + } + } + return UNKNOWN +} + +// Strings returns an array of the detected features for FlagsSet. +func (s flagSet) Strings() []string { + if len(s) == 0 { + return []string{""} + } + r := make([]string, 0) + for i := firstID; i < lastID; i++ { + if s.inSet(i) { + r = append(r, i.String()) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + vend, _ := vendorID() + + if mfi < 0x4 || (vend != Intel && vend != AMD) { + return 1 + } + + if mfi < 0xb { + if vend != Intel { + return 1 + } + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + if vend == AMD { + // if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core. + // The number of threads per core is ThreadsPerCore+1 + // See PPR for AMD Family 17h Models 00h-0Fh (page 82) + fam, _, _ := familyModel() + _, _, _, d := cpuid(1) + if (d&(1<<28)) != 0 && fam >= 23 { + if maxExtendedFunction() >= 0x8000001e { + _, b, _, _ := cpuid(0x8000001e) + return int((b>>8)&0xff) + 1 + } + return 2 + } + } + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + v, _ := vendorID() + switch v { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD, Hygon: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (family, model, stepping int) { + if maxFunctionID() < 0x1 { + return 0, 0, 0 + } + eax, _, _, _ := cpuid(1) + // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0]. + family = int((eax >> 8) & 0xf) + extFam := family == 0x6 // Intel is 0x6, needs extended model. + if family == 0xf { + // Add ExtFamily + family += int((eax >> 20) & 0xff) + extFam = true + } + // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0]. + model = int((eax >> 4) & 0xf) + if extFam { + // Add ExtModel + model += int((eax >> 12) & 0xf0) + } + stepping = int(eax & 0xf) + return family, model, stepping +} + +func physicalCores() int { + v, _ := vendorID() + switch v { + case Intel: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + return 0 + case AMD, Hygon: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + + // The following is inaccurate on AMD EPYC 7742 64-Core Processor + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + if c&0xff > 0 { + return int(c&0xff) + 1 + } + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVM": KVM, + "Linux KVM Hv": KVM, + "TCGTCGTCGTCG": QEMU, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, + "bhyve bhyve ": Bhyve, + "HygonGenuine": Hygon, + "Vortex86 SoC": SiS, + "SiS SiS SiS ": SiS, + "RiseRiseRise": SiS, + "Genuine RDC": RDC, + "QNXQVMBSQG": QNX, + "ACRNACRNACRN": ACRN, + "SRESRESRESRE": SRE, + "Apple VZ": Apple, +} + +func vendorID() (Vendor, string) { + _, b, c, d := cpuid(0) + v := string(valAsString(b, d, c)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func hypervisorVendorID() (Vendor, string) { + // https://lwn.net/Articles/301888/ + _, b, c, d := cpuid(0x40000000) + v := string(valAsString(b, c, d)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor, _ := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0 + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD, Hygon: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + + // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties + if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) { + return + } + + // Xen Hypervisor is buggy and returns the same entry no matter ECX value. + // Hack: When we encounter the same entry 100 times we break. + nSame := 0 + var last uint32 + for i := uint32(0); i < math.MaxUint32; i++ { + eax, ebx, ecx, _ := cpuidex(0x8000001D, i) + + level := (eax >> 5) & 7 + cacheNumSets := ecx + 1 + cacheLineSize := 1 + (ebx & 2047) + cachePhysPartitions := 1 + ((ebx >> 12) & 511) + cacheNumWays := 1 + ((ebx >> 22) & 511) + + typ := eax & 15 + size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) + if typ == 0 { + return + } + + // Check for the same value repeated. + comb := eax ^ ebx ^ ecx + if comb == last { + nSame++ + if nSame == 100 { + return + } + } + last = comb + + switch level { + case 1: + switch typ { + case 1: + // Data cache + c.Cache.L1D = size + case 2: + // Inst cache + c.Cache.L1I = size + default: + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + } +} + +type SGXEPCSection struct { + BaseAddress uint64 + EPCSize uint64 +} + +type SGXSupport struct { + Available bool + LaunchControl bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 + EPCSections []SGXEPCSection +} + +func hasSGX(available, lc bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + rval.LaunchControl = lc + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + rval.EPCSections = make([]SGXEPCSection, 0) + + for subleaf := uint32(2); subleaf < 2+8; subleaf++ { + eax, ebx, ecx, edx := cpuidex(0x12, subleaf) + leafType := eax & 0xf + + if leafType == 0 { + // Invalid subleaf, stop iterating + break + } else if leafType == 1 { + // EPC Section subleaf + baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) + size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) + + section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} + rval.EPCSections = append(rval.EPCSections, section) + } + } + + return +} + +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + +func support() flagSet { + var fs flagSet + mfi := maxFunctionID() + vend, _ := vendorID() + if mfi < 0x1 { + return fs + } + family, model, _ := familyModel() + + _, _, c, d := cpuid(1) + fs.setIf((d&(1<<0)) != 0, X87) + fs.setIf((d&(1<<8)) != 0, CMPXCHG8) + fs.setIf((d&(1<<11)) != 0, SYSEE) + fs.setIf((d&(1<<15)) != 0, CMOV) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<24)) != 0, FXSR) + fs.setIf((d&(1<<25)) != 0, FXSROPT) + fs.setIf((d&(1<<25)) != 0, SSE) + fs.setIf((d&(1<<26)) != 0, SSE2) + fs.setIf((c&1) != 0, SSE3) + fs.setIf((c&(1<<5)) != 0, VMX) + fs.setIf((c&(1<<9)) != 0, SSSE3) + fs.setIf((c&(1<<19)) != 0, SSE4) + fs.setIf((c&(1<<20)) != 0, SSE42) + fs.setIf((c&(1<<25)) != 0, AESNI) + fs.setIf((c&(1<<1)) != 0, CLMUL) + fs.setIf(c&(1<<22) != 0, MOVBE) + fs.setIf(c&(1<<23) != 0, POPCNT) + fs.setIf(c&(1<<30) != 0, RDRAND) + + // This bit has been reserved by Intel & AMD for use by hypervisors, + // and indicates the presence of a hypervisor. + fs.setIf(c&(1<<31) != 0, HYPERVISOR) + fs.setIf(c&(1<<29) != 0, F16C) + fs.setIf(c&(1<<13) != 0, CX16) + + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + fs.setIf(c&1<<26 != 0, XSAVE) + fs.setIf(c&1<<27 != 0, OSXSAVE) + // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits + const avxCheck = 1<<26 | 1<<27 | 1<<28 + if c&avxCheck == avxCheck { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + fs.set(AVX) + switch vend { + case Intel: + // Older than Haswell. + fs.setIf(family == 6 && model < 60, AVXSLOW) + case AMD: + // Older than Zen 2 + fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) + } + } + } + // FMA3 can be used with SSE registers, so no OS support is strictly needed. + // fma3 and OSXSAVE needed. + const fma3Check = 1<<12 | 1<<27 + fs.setIf(c&fma3Check == fma3Check, FMA3) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if fs.inSet(AVX) && (ebx&0x00000020) != 0 { + fs.set(AVX2) + } + // CPUID.(EAX=7, ECX=0).EBX + if (ebx & 0x00000008) != 0 { + fs.set(BMI1) + fs.setIf((ebx&0x00000100) != 0, BMI2) + } + fs.setIf(ebx&(1<<2) != 0, SGX) + fs.setIf(ebx&(1<<4) != 0, HLE) + fs.setIf(ebx&(1<<9) != 0, ERMS) + fs.setIf(ebx&(1<<11) != 0, RTM) + fs.setIf(ebx&(1<<14) != 0, MPX) + fs.setIf(ebx&(1<<18) != 0, RDSEED) + fs.setIf(ebx&(1<<19) != 0, ADX) + fs.setIf(ebx&(1<<29) != 0, SHA) + + // CPUID.(EAX=7, ECX=0).ECX + fs.setIf(ecx&(1<<5) != 0, WAITPKG) + fs.setIf(ecx&(1<<7) != 0, CETSS) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) + fs.setIf(ecx&(1<<13) != 0, TME) + fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<23) != 0, KEYLOCKER) + fs.setIf(ecx&(1<<27) != 0, MOVDIRI) + fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) + fs.setIf(ecx&(1<<29) != 0, ENQCMD) + fs.setIf(ecx&(1<<30) != 0, SGXLC) + + // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<4) != 0, FSRM) + fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL) + fs.setIf(edx&(1<<10) != 0, MD_CLEAR) + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) + fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<15) != 0, HYBRID_CPU) + fs.setIf(edx&(1<<16) != 0, TSXLDTRK) + fs.setIf(edx&(1<<18) != 0, PCONFIG) + fs.setIf(edx&(1<<20) != 0, CETIBT) + fs.setIf(edx&(1<<26) != 0, IBPB) + fs.setIf(edx&(1<<27) != 0, STIBP) + fs.setIf(edx&(1<<28) != 0, FLUSH_L1D) + fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP) + fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) + fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) + + // CPUID.(EAX=7, ECX=1).EAX + eax1, _, _, edx1 := cpuidex(7, 1) + fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<1) != 0, SM3_X86) + fs.setIf(eax1&(1<<2) != 0, SM4_X86) + fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) + fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) + fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) + fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) + fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<23) != 0, AVXIFMA) + fs.setIf(eax1&(1<<26) != 0, LAM) + + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<6) != 0, AMXTRANSPOSE) + fs.setIf(edx1&(1<<7) != 0, AMXTF32) + fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) + fs.setIf(edx1&(1<<14) != 0, PREFETCHI) + fs.setIf(edx1&(1<<19) != 0, AVX10) + fs.setIf(edx1&(1<<21) != 0, APX_F) + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 + if runtime.GOOS == "darwin" { + hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() + } + if hasAVX512 { + fs.setIf(ebx&(1<<16) != 0, AVX512F) + fs.setIf(ebx&(1<<17) != 0, AVX512DQ) + fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) + fs.setIf(ebx&(1<<26) != 0, AVX512PF) + fs.setIf(ebx&(1<<27) != 0, AVX512ER) + fs.setIf(ebx&(1<<28) != 0, AVX512CD) + fs.setIf(ebx&(1<<30) != 0, AVX512BW) + fs.setIf(ebx&(1<<31) != 0, AVX512VL) + // ecx + fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<3) != 0, AMXFP8) + fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) + fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) + fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) + fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) + // edx + fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) + fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) + fs.setIf(edx&(1<<24) != 0, AMXTILE) + fs.setIf(edx&(1<<25) != 0, AMXINT8) + // eax1 = CPUID.(EAX=7, ECX=1).EAX + fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<19) != 0, WRMSRNS) + fs.setIf(eax1&(1<<21) != 0, AMXFP16) + fs.setIf(eax1&(1<<27) != 0, MSRLIST) + } + } + + // CPUID.(EAX=7, ECX=2) + _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<0) != 0, PSFD) + fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL) + fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL) + fs.setIf(edx&(1<<4) != 0, BHI_CTRL) + fs.setIf(edx&(1<<5) != 0, MCDT_NO) + + if fs.inSet(SGX) { + eax, _, _, _ := cpuidex(0x12, 0) + fs.setIf(eax&(1<<12) != 0, SGXPQC) + } + + // Add keylocker features. + if fs.inSet(KEYLOCKER) && mfi >= 0x19 { + _, ebx, _, _ := cpuidex(0x19, 0) + fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4) + } + + // Add AVX10 features. + if fs.inSet(AVX10) && mfi >= 0x24 { + _, ebx, _, _ := cpuidex(0x24, 0) + fs.setIf(ebx&(1<<16) != 0, AVX10_128) + fs.setIf(ebx&(1<<17) != 0, AVX10_256) + fs.setIf(ebx&(1<<18) != 0, AVX10_512) + } + + } + + // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) + // EAX + // Bit 00: XSAVEOPT is available. + // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set. + // Bit 02: Supports XGETBV with ECX = 1 if set. + // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set. + // Bits 31 - 04: Reserved. + // EBX + // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS. + // ECX + // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1. + // EDX? + // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved. + if mfi >= 0xd { + if fs.inSet(XSAVE) { + eax, _, _, _ := cpuidex(0xd, 1) + fs.setIf(eax&(1<<0) != 0, XSAVEOPT) + fs.setIf(eax&(1<<1) != 0, XSAVEC) + fs.setIf(eax&(1<<2) != 0, XGETBV1) + fs.setIf(eax&(1<<3) != 0, XSAVES) + } + } + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + fs.set(LZCNT) + fs.set(POPCNT) + } + // ECX + fs.setIf((c&(1<<0)) != 0, LAHF) + fs.setIf((c&(1<<2)) != 0, SVM) + fs.setIf((c&(1<<6)) != 0, SSE4A) + fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((c&(1<<22)) != 0, TOPEXT) + + // EDX + fs.setIf(d&(1<<11) != 0, SYSCALL) + fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<22) != 0, MMXEXT) + fs.setIf(d&(1<<23) != 0, MMX) + fs.setIf(d&(1<<24) != 0, FXSR) + fs.setIf(d&(1<<25) != 0, FXSROPT) + fs.setIf(d&(1<<27) != 0, RDTSCP) + fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT) + fs.setIf(d&(1<<31) != 0, AMD3DNOW) + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if fs.inSet(AVX) { + fs.setIf((c&(1<<11)) != 0, XOP) + fs.setIf((c&(1<<16)) != 0, FMA4) + } + + } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + + if maxExtendedFunction() >= 0x80000008 { + _, b, _, _ := cpuid(0x80000008) + fs.setIf(b&(1<<28) != 0, PSFD) + fs.setIf(b&(1<<27) != 0, CPPC) + fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD) + fs.setIf(b&(1<<23) != 0, PPIN) + fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED) + fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS) + fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP) + fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED) + fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON) + fs.setIf(b&(1<<15) != 0, STIBP) + fs.setIf(b&(1<<14) != 0, IBRS) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf(b&(1<<12) != 0, IBPB) + fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) + } + + if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A { + _, _, _, edx := cpuid(0x8000000A) + fs.setIf((edx>>0)&1 == 1, SVMNP) + fs.setIf((edx>>1)&1 == 1, LBRVIRT) + fs.setIf((edx>>2)&1 == 1, SVML) + fs.setIf((edx>>3)&1 == 1, NRIPS) + fs.setIf((edx>>4)&1 == 1, TSCRATEMSR) + fs.setIf((edx>>5)&1 == 1, VMCBCLEAN) + fs.setIf((edx>>6)&1 == 1, SVMFBASID) + fs.setIf((edx>>7)&1 == 1, SVMDA) + fs.setIf((edx>>10)&1 == 1, SVMPF) + fs.setIf((edx>>12)&1 == 1, SVMPFT) + } + + if maxExtendedFunction() >= 0x8000001a { + eax, _, _, _ := cpuid(0x8000001a) + fs.setIf((eax>>0)&1 == 1, FP128) + fs.setIf((eax>>1)&1 == 1, MOVU) + fs.setIf((eax>>2)&1 == 1, FP256) + } + + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { + eax, _, _, _ := cpuid(0x8000001b) + fs.setIf((eax>>0)&1 == 1, IBSFFV) + fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) + fs.setIf((eax>>2)&1 == 1, IBSOPSAM) + fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) + fs.setIf((eax>>4)&1 == 1, IBSOPCNT) + fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) + fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) + fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE) + fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX) + fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1. + fs.setIf((eax>>11)&1 == 1, IBS_ZEN4) + } + + if maxExtendedFunction() >= 0x8000001f && vend == AMD { + a, _, _, _ := cpuid(0x8000001f) + fs.setIf((a>>0)&1 == 1, SME) + fs.setIf((a>>1)&1 == 1, SEV) + fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH) + fs.setIf((a>>3)&1 == 1, SEV_ES) + fs.setIf((a>>4)&1 == 1, SEV_SNP) + fs.setIf((a>>5)&1 == 1, VMPL) + fs.setIf((a>>10)&1 == 1, SME_COHERENT) + fs.setIf((a>>11)&1 == 1, SEV_64BIT) + fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED) + fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE) + fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP) + fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST) + fs.setIf((a>>16)&1 == 1, VTE) + fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) + } + + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, c, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + fs.setIf((c>>1)&1 == 1, TSA_L1_NO) + fs.setIf((c>>2)&1 == 1, TSA_SQ_NO) + fs.setIf((a>>5)&1 == 1, TSA_VERW_CLEAR) + } + if vend == AMD { + if family < 0x19 { + // AMD CPUs that are older than Family 19h are not vulnerable to TSA but do not set TSA_L1_NO or TSA_SQ_NO. + // Source: https://www.amd.com/content/dam/amd/en/documents/resources/bulletin/technical-guidance-for-mitigating-transient-scheduler-attacks.pdf + fs.set(TSA_L1_NO) + fs.set(TSA_SQ_NO) + } else if family == 0x1a { + // AMD Family 1Ah models 00h-4Fh and 60h-7Fh are also not vulnerable to TSA but do not set TSA_L1_NO or TSA_SQ_NO. + // Future AMD CPUs will set these CPUID bits if appropriate. CPUs will be designed to set these CPUID bits if appropriate. + notVuln := model <= 0x4f || (model >= 0x60 && model <= 0x7f) + fs.setIf(notVuln, TSA_L1_NO, TSA_SQ_NO) + } + } + + if mfi >= 0x20 { + // Microsoft has decided to purposefully hide the information + // of the guest TEE when VMs are being created using Hyper-V. + // + // This leads us to check for the Hyper-V cpuid features + // (0x4000000C), and then for the `ebx` value set. + // + // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part + // we're mostly interested about,according to: + // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174 + _, ebx, _, _ := cpuid(0x4000000C) + fs.setIf(ebx == 0xbe3, TDX_GUEST) + } + + if mfi >= 0x21 { + // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21). + _, ebx, ecx, edx := cpuid(0x21) + identity := string(valAsString(ebx, edx, ecx)) + fs.setIf(identity == "IntelTDX ", TDX_GUEST) + } + + return fs +} + +func (c *CPUInfo) supportAVX10() uint8 { + if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) { + _, ebx, _, _ := cpuidex(0x24, 0) + return uint8(ebx) + } + return 0 +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} + +func parseLeaf0AH(c *CPUInfo, eax, ebx, edx uint32) (info PerformanceMonitoringInfo) { + info.VersionID = uint8(eax & 0xFF) + info.NumGPCounters = uint8((eax >> 8) & 0xFF) + info.GPPMCWidth = uint8((eax >> 16) & 0xFF) + + info.RawEBX = ebx + info.RawEAX = eax + info.RawEDX = edx + + if info.VersionID > 1 { // This information is only valid if VersionID > 1 + info.NumFixedPMC = uint8(edx & 0x1F) // Bits 4:0 + info.FixedPMCWidth = uint8((edx >> 5) & 0xFF) // Bits 12:5 + } + if info.VersionID > 0 { + // first 4 fixed events are always instructions retired, cycles, ref cycles and topdown slots + if ebx == 0x0 && info.NumFixedPMC == 3 { + c.featureSet.set(PMU_FIXEDCOUNTER_INSTRUCTIONS) + c.featureSet.set(PMU_FIXEDCOUNTER_CYCLES) + c.featureSet.set(PMU_FIXEDCOUNTER_REFCYCLES) + } + if ebx == 0x0 && info.NumFixedPMC == 4 { + c.featureSet.set(PMU_FIXEDCOUNTER_INSTRUCTIONS) + c.featureSet.set(PMU_FIXEDCOUNTER_CYCLES) + c.featureSet.set(PMU_FIXEDCOUNTER_REFCYCLES) + c.featureSet.set(PMU_FIXEDCOUNTER_TOPDOWN_SLOTS) + } + if ebx != 0x0 { + if ((ebx >> 0) & 1) == 0 { + c.featureSet.set(PMU_FIXEDCOUNTER_INSTRUCTIONS) + } + if ((ebx >> 1) & 1) == 0 { + c.featureSet.set(PMU_FIXEDCOUNTER_CYCLES) + } + if ((ebx >> 2) & 1) == 0 { + c.featureSet.set(PMU_FIXEDCOUNTER_REFCYCLES) + } + if ((ebx >> 3) & 1) == 0 { + c.featureSet.set(PMU_FIXEDCOUNTER_TOPDOWN_SLOTS) + } + } + } + return info +} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s new file mode 100644 index 00000000..8587c3a1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s @@ -0,0 +1,47 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0 + MOVL $0, eax+0(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s new file mode 100644 index 00000000..bc11f894 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s @@ -0,0 +1,72 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// From https://go-review.googlesource.com/c/sys/+/285572/ +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 + MOVB $0, ret+0(FP) // default to false + +#ifdef GOOS_darwin // return if not darwin +#ifdef GOARCH_amd64 // return if not amd64 +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + MOVW (BX), AX + CMPW AX, $13 // versions < 13 do not support AVX512 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), AX + MOVQ $hasAVX512F, CX + ANDQ CX, AX + JZ no_avx512 + MOVB $1, ret+0(FP) + +no_avx512: +#endif +#endif + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s new file mode 100644 index 00000000..b196f78e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -0,0 +1,36 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt + +// func getMidr +TEXT ·getMidr(SB), 7, $0 + WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ + MOVD R0, midr+0(FP) + RET + +// func getProcFeatures +TEXT ·getProcFeatures(SB), 7, $0 + WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ + MOVD R0, procFeatures+0(FP) + RET + +// func getInstAttributes +TEXT ·getInstAttributes(SB), 7, $0 + WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ + WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ + MOVD R0, instAttrReg0+0(FP) + MOVD R1, instAttrReg1+8(FP) + RET + +TEXT ·getVectorLength(SB), 7, $0 + WORD $0xd2800002 // mov x2, #0 + WORD $0x04225022 // addvl x2, x2, #1 + WORD $0xd37df042 // lsl x2, x2, #3 + WORD $0xd2800003 // mov x3, #0 + WORD $0x04635023 // addpl x3, x3, #1 + WORD $0xd37df063 // lsl x3, x3, #3 + MOVD R2, vl+0(FP) + MOVD R3, pl+8(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go new file mode 100644 index 00000000..9ae32d60 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -0,0 +1,250 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !gccgo && !noasm && !appengine +// +build arm64,!gccgo,!noasm,!appengine + +package cpuid + +import "runtime" + +func getMidr() (midr uint64) +func getProcFeatures() (procFeatures uint64) +func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) +func getVectorLength() (vl, pl uint64) + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(c *CPUInfo, safe bool) { + // Seems to be safe to assume on ARM64 + c.CacheLine = 64 + detectOS(c) + + // ARM64 disabled since it may crash if interrupt is not intercepted by OS. + if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { + return + } + midr := getMidr() + + // MIDR_EL1 - Main ID Register + // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | Implementer | [31-24] | y | + // |--------------------------------------------------| + // | Variant | [23-20] | y | + // |--------------------------------------------------| + // | Architecture | [19-16] | y | + // |--------------------------------------------------| + // | PartNum | [15-4] | y | + // |--------------------------------------------------| + // | Revision | [3-0] | y | + // x--------------------------------------------------x + + switch (midr >> 24) & 0xff { + case 0xC0: + c.VendorString = "Ampere Computing" + c.VendorID = Ampere + case 0x41: + c.VendorString = "Arm Limited" + c.VendorID = ARM + case 0x42: + c.VendorString = "Broadcom Corporation" + c.VendorID = Broadcom + case 0x43: + c.VendorString = "Cavium Inc" + c.VendorID = Cavium + case 0x44: + c.VendorString = "Digital Equipment Corporation" + c.VendorID = DEC + case 0x46: + c.VendorString = "Fujitsu Ltd" + c.VendorID = Fujitsu + case 0x49: + c.VendorString = "Infineon Technologies AG" + c.VendorID = Infineon + case 0x4D: + c.VendorString = "Motorola or Freescale Semiconductor Inc" + c.VendorID = Motorola + case 0x4E: + c.VendorString = "NVIDIA Corporation" + c.VendorID = NVIDIA + case 0x50: + c.VendorString = "Applied Micro Circuits Corporation" + c.VendorID = AMCC + case 0x51: + c.VendorString = "Qualcomm Inc" + c.VendorID = Qualcomm + case 0x56: + c.VendorString = "Marvell International Ltd" + c.VendorID = Marvell + case 0x69: + c.VendorString = "Intel Corporation" + c.VendorID = Intel + } + + // Lower 4 bits: Architecture + // Architecture Meaning + // 0b0001 Armv4. + // 0b0010 Armv4T. + // 0b0011 Armv5 (obsolete). + // 0b0100 Armv5T. + // 0b0101 Armv5TE. + // 0b0110 Armv5TEJ. + // 0b0111 Armv6. + // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. + // Upper 4 bit: Variant + // An IMPLEMENTATION DEFINED variant number. + // Typically, this field is used to distinguish between different product variants, or major revisions of a product. + c.Family = int(midr>>16) & 0xff + + // PartNum, bits [15:4] + // An IMPLEMENTATION DEFINED primary part number for the device. + // On processors implemented by Arm, if the top four bits of the primary + // part number are 0x0 or 0x7, the variant and architecture are encoded differently. + // Revision, bits [3:0] + // An IMPLEMENTATION DEFINED revision number for the device. + c.Model = int(midr) & 0xffff + + procFeatures := getProcFeatures() + + // ID_AA64PFR0_EL1 - Processor Feature Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | DIT | [51-48] | y | + // |--------------------------------------------------| + // | SVE | [35-32] | y | + // |--------------------------------------------------| + // | GIC | [27-24] | n | + // |--------------------------------------------------| + // | AdvSIMD | [23-20] | y | + // |--------------------------------------------------| + // | FP | [19-16] | y | + // |--------------------------------------------------| + // | EL3 | [15-12] | n | + // |--------------------------------------------------| + // | EL2 | [11-8] | n | + // |--------------------------------------------------| + // | EL1 | [7-4] | n | + // |--------------------------------------------------| + // | EL0 | [3-0] | n | + // x--------------------------------------------------x + + var f flagSet + // if procFeatures&(0xf<<48) != 0 { + // fmt.Println("DIT") + // } + f.setIf(procFeatures&(0xf<<32) != 0, SVE) + if procFeatures&(0xf<<20) != 15<<20 { + f.set(ASIMD) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 + // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. + f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) + } + f.setIf(procFeatures&(0xf<<16) != 0, FP) + + instAttrReg0, instAttrReg1 := getInstAttributes() + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // + // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | RNDR | [63-60] | y | + // |--------------------------------------------------| + // | TLB | [59-56] | y | + // |--------------------------------------------------| + // | TS | [55-52] | y | + // |--------------------------------------------------| + // | FHM | [51-48] | y | + // |--------------------------------------------------| + // | DP | [47-44] | y | + // |--------------------------------------------------| + // | SM4 | [43-40] | y | + // |--------------------------------------------------| + // | SM3 | [39-36] | y | + // |--------------------------------------------------| + // | SHA3 | [35-32] | y | + // |--------------------------------------------------| + // | RDM | [31-28] | y | + // |--------------------------------------------------| + // | ATOMICS | [23-20] | y | + // |--------------------------------------------------| + // | CRC32 | [19-16] | y | + // |--------------------------------------------------| + // | SHA2 | [15-12] | y | + // |--------------------------------------------------| + // | SHA1 | [11-8] | y | + // |--------------------------------------------------| + // | AES | [7-4] | y | + // x--------------------------------------------------x + + f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR) + f.setIf(instAttrReg0&(0xf<<56) != 0, TLB) + f.setIf(instAttrReg0&(0xf<<52) != 0, TS) + f.setIf(instAttrReg0&(0xf<<48) != 0, FHM) + f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) + f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) + f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) + f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) + f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) + f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) + f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) + f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. + f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) + f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) + f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. + f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 + // + // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | GPI | [31-28] | y | + // |--------------------------------------------------| + // | GPA | [27-24] | y | + // |--------------------------------------------------| + // | LRCPC | [23-20] | y | + // |--------------------------------------------------| + // | FCMA | [19-16] | y | + // |--------------------------------------------------| + // | JSCVT | [15-12] | y | + // |--------------------------------------------------| + // | API | [11-8] | y | + // |--------------------------------------------------| + // | APA | [7-4] | y | + // |--------------------------------------------------| + // | DPB | [3-0] | y | + // x--------------------------------------------------x + + // if instAttrReg1&(0xf<<28) != 0 { + // fmt.Println("GPI") + // } + f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) + f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) + f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) + f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) + // if instAttrReg1&(0xf<<8) != 0 { + // fmt.Println("API") + // } + // if instAttrReg1&(0xf<<4) != 0 { + // fmt.Println("APA") + // } + f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) + + // Store + c.featureSet.or(f) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go new file mode 100644 index 00000000..574f9389 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine +// +build !amd64,!386,!arm64 gccgo noasm appengine + +package cpuid + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } + +} + +func addInfo(info *CPUInfo, safe bool) {} +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go new file mode 100644 index 00000000..14a56b93 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -0,0 +1,45 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) +// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +func asmDarwinHasAVX512() bool + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm + darwinHasAVX512 = asmDarwinHasAVX512 +} + +func addInfo(c *CPUInfo, safe bool) { + c.maxFunc = maxFunctionID() + c.maxExFunc = maxExtendedFunction() + c.BrandName = brandName() + c.CacheLine = cacheLine() + c.Family, c.Model, c.Stepping = familyModel() + c.featureSet = support() + c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) + c.ThreadsPerCore = threadsPerCore() + c.LogicalCores = logicalCores() + c.PhysicalCores = physicalCores() + c.VendorID, c.VendorString = vendorID() + c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() + c.AVX10Level = c.supportAVX10() + c.cacheSize() + c.frequencies() + if c.maxFunc >= 0x0A { + eax, ebx, _, edx := cpuid(0x0A) + c.PMU = parseLeaf0AH(c, eax, ebx, edx) + } +} + +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go new file mode 100644 index 00000000..2888bae8 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -0,0 +1,308 @@ +// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. + +package cpuid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ADX-1] + _ = x[AESNI-2] + _ = x[AMD3DNOW-3] + _ = x[AMD3DNOWEXT-4] + _ = x[AMXBF16-5] + _ = x[AMXFP16-6] + _ = x[AMXINT8-7] + _ = x[AMXFP8-8] + _ = x[AMXTILE-9] + _ = x[AMXTF32-10] + _ = x[AMXCOMPLEX-11] + _ = x[AMXTRANSPOSE-12] + _ = x[APX_F-13] + _ = x[AVX-14] + _ = x[AVX10-15] + _ = x[AVX10_128-16] + _ = x[AVX10_256-17] + _ = x[AVX10_512-18] + _ = x[AVX2-19] + _ = x[AVX512BF16-20] + _ = x[AVX512BITALG-21] + _ = x[AVX512BW-22] + _ = x[AVX512CD-23] + _ = x[AVX512DQ-24] + _ = x[AVX512ER-25] + _ = x[AVX512F-26] + _ = x[AVX512FP16-27] + _ = x[AVX512IFMA-28] + _ = x[AVX512PF-29] + _ = x[AVX512VBMI-30] + _ = x[AVX512VBMI2-31] + _ = x[AVX512VL-32] + _ = x[AVX512VNNI-33] + _ = x[AVX512VP2INTERSECT-34] + _ = x[AVX512VPOPCNTDQ-35] + _ = x[AVXIFMA-36] + _ = x[AVXNECONVERT-37] + _ = x[AVXSLOW-38] + _ = x[AVXVNNI-39] + _ = x[AVXVNNIINT8-40] + _ = x[AVXVNNIINT16-41] + _ = x[BHI_CTRL-42] + _ = x[BMI1-43] + _ = x[BMI2-44] + _ = x[CETIBT-45] + _ = x[CETSS-46] + _ = x[CLDEMOTE-47] + _ = x[CLMUL-48] + _ = x[CLZERO-49] + _ = x[CMOV-50] + _ = x[CMPCCXADD-51] + _ = x[CMPSB_SCADBS_SHORT-52] + _ = x[CMPXCHG8-53] + _ = x[CPBOOST-54] + _ = x[CPPC-55] + _ = x[CX16-56] + _ = x[EFER_LMSLE_UNS-57] + _ = x[ENQCMD-58] + _ = x[ERMS-59] + _ = x[F16C-60] + _ = x[FLUSH_L1D-61] + _ = x[FMA3-62] + _ = x[FMA4-63] + _ = x[FP128-64] + _ = x[FP256-65] + _ = x[FSRM-66] + _ = x[FXSR-67] + _ = x[FXSROPT-68] + _ = x[GFNI-69] + _ = x[HLE-70] + _ = x[HRESET-71] + _ = x[HTT-72] + _ = x[HWA-73] + _ = x[HYBRID_CPU-74] + _ = x[HYPERVISOR-75] + _ = x[IA32_ARCH_CAP-76] + _ = x[IA32_CORE_CAP-77] + _ = x[IBPB-78] + _ = x[IBPB_BRTYPE-79] + _ = x[IBRS-80] + _ = x[IBRS_PREFERRED-81] + _ = x[IBRS_PROVIDES_SMP-82] + _ = x[IBS-83] + _ = x[IBSBRNTRGT-84] + _ = x[IBSFETCHSAM-85] + _ = x[IBSFFV-86] + _ = x[IBSOPCNT-87] + _ = x[IBSOPCNTEXT-88] + _ = x[IBSOPSAM-89] + _ = x[IBSRDWROPCNT-90] + _ = x[IBSRIPINVALIDCHK-91] + _ = x[IBS_FETCH_CTLX-92] + _ = x[IBS_OPDATA4-93] + _ = x[IBS_OPFUSE-94] + _ = x[IBS_PREVENTHOST-95] + _ = x[IBS_ZEN4-96] + _ = x[IDPRED_CTRL-97] + _ = x[INT_WBINVD-98] + _ = x[INVLPGB-99] + _ = x[KEYLOCKER-100] + _ = x[KEYLOCKERW-101] + _ = x[LAHF-102] + _ = x[LAM-103] + _ = x[LBRVIRT-104] + _ = x[LZCNT-105] + _ = x[MCAOVERFLOW-106] + _ = x[MCDT_NO-107] + _ = x[MCOMMIT-108] + _ = x[MD_CLEAR-109] + _ = x[MMX-110] + _ = x[MMXEXT-111] + _ = x[MOVBE-112] + _ = x[MOVDIR64B-113] + _ = x[MOVDIRI-114] + _ = x[MOVSB_ZL-115] + _ = x[MOVU-116] + _ = x[MPX-117] + _ = x[MSRIRC-118] + _ = x[MSRLIST-119] + _ = x[MSR_PAGEFLUSH-120] + _ = x[NRIPS-121] + _ = x[NX-122] + _ = x[OSXSAVE-123] + _ = x[PCONFIG-124] + _ = x[POPCNT-125] + _ = x[PPIN-126] + _ = x[PREFETCHI-127] + _ = x[PSFD-128] + _ = x[RDPRU-129] + _ = x[RDRAND-130] + _ = x[RDSEED-131] + _ = x[RDTSCP-132] + _ = x[RRSBA_CTRL-133] + _ = x[RTM-134] + _ = x[RTM_ALWAYS_ABORT-135] + _ = x[SBPB-136] + _ = x[SERIALIZE-137] + _ = x[SEV-138] + _ = x[SEV_64BIT-139] + _ = x[SEV_ALTERNATIVE-140] + _ = x[SEV_DEBUGSWAP-141] + _ = x[SEV_ES-142] + _ = x[SEV_RESTRICTED-143] + _ = x[SEV_SNP-144] + _ = x[SGX-145] + _ = x[SGXLC-146] + _ = x[SGXPQC-147] + _ = x[SHA-148] + _ = x[SME-149] + _ = x[SME_COHERENT-150] + _ = x[SM3_X86-151] + _ = x[SM4_X86-152] + _ = x[SPEC_CTRL_SSBD-153] + _ = x[SRBDS_CTRL-154] + _ = x[SRSO_MSR_FIX-155] + _ = x[SRSO_NO-156] + _ = x[SRSO_USER_KERNEL_NO-157] + _ = x[SSE-158] + _ = x[SSE2-159] + _ = x[SSE3-160] + _ = x[SSE4-161] + _ = x[SSE42-162] + _ = x[SSE4A-163] + _ = x[SSSE3-164] + _ = x[STIBP-165] + _ = x[STIBP_ALWAYSON-166] + _ = x[STOSB_SHORT-167] + _ = x[SUCCOR-168] + _ = x[SVM-169] + _ = x[SVMDA-170] + _ = x[SVMFBASID-171] + _ = x[SVML-172] + _ = x[SVMNP-173] + _ = x[SVMPF-174] + _ = x[SVMPFT-175] + _ = x[SYSCALL-176] + _ = x[SYSEE-177] + _ = x[TBM-178] + _ = x[TDX_GUEST-179] + _ = x[TLB_FLUSH_NESTED-180] + _ = x[TME-181] + _ = x[TOPEXT-182] + _ = x[TSA_L1_NO-183] + _ = x[TSA_SQ_NO-184] + _ = x[TSA_VERW_CLEAR-185] + _ = x[TSCRATEMSR-186] + _ = x[TSXLDTRK-187] + _ = x[VAES-188] + _ = x[VMCBCLEAN-189] + _ = x[VMPL-190] + _ = x[VMSA_REGPROT-191] + _ = x[VMX-192] + _ = x[VPCLMULQDQ-193] + _ = x[VTE-194] + _ = x[WAITPKG-195] + _ = x[WBNOINVD-196] + _ = x[WRMSRNS-197] + _ = x[X87-198] + _ = x[XGETBV1-199] + _ = x[XOP-200] + _ = x[XSAVE-201] + _ = x[XSAVEC-202] + _ = x[XSAVEOPT-203] + _ = x[XSAVES-204] + _ = x[AESARM-205] + _ = x[ARMCPUID-206] + _ = x[ASIMD-207] + _ = x[ASIMDDP-208] + _ = x[ASIMDHP-209] + _ = x[ASIMDRDM-210] + _ = x[ATOMICS-211] + _ = x[CRC32-212] + _ = x[DCPOP-213] + _ = x[EVTSTRM-214] + _ = x[FCMA-215] + _ = x[FHM-216] + _ = x[FP-217] + _ = x[FPHP-218] + _ = x[GPA-219] + _ = x[JSCVT-220] + _ = x[LRCPC-221] + _ = x[PMULL-222] + _ = x[RNDR-223] + _ = x[TLB-224] + _ = x[TS-225] + _ = x[SHA1-226] + _ = x[SHA2-227] + _ = x[SHA3-228] + _ = x[SHA512-229] + _ = x[SM3-230] + _ = x[SM4-231] + _ = x[SVE-232] + _ = x[PMU_FIXEDCOUNTER_CYCLES-233] + _ = x[PMU_FIXEDCOUNTER_REFCYCLES-234] + _ = x[PMU_FIXEDCOUNTER_INSTRUCTIONS-235] + _ = x[PMU_FIXEDCOUNTER_TOPDOWN_SLOTS-236] + _ = x[lastID-237] + _ = x[firstID-0] +} + +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAMXTRANSPOSEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSGXPQCSHASMESME_COHERENTSM3_X86SM4_X86SPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSA_L1_NOTSA_SQ_NOTSA_VERW_CLEARTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVEPMU_FIXEDCOUNTER_CYCLESPMU_FIXEDCOUNTER_REFCYCLESPMU_FIXEDCOUNTER_INSTRUCTIONSPMU_FIXEDCOUNTER_TOPDOWN_SLOTSlastID" + +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 97, 102, 105, 110, 119, 128, 137, 141, 151, 163, 171, 179, 187, 195, 202, 212, 222, 230, 240, 251, 259, 269, 287, 302, 309, 321, 328, 335, 346, 358, 366, 370, 374, 380, 385, 393, 398, 404, 408, 417, 435, 443, 450, 454, 458, 472, 478, 482, 486, 495, 499, 503, 508, 513, 517, 521, 528, 532, 535, 541, 544, 547, 557, 567, 580, 593, 597, 608, 612, 626, 643, 646, 656, 667, 673, 681, 692, 700, 712, 728, 742, 753, 763, 778, 786, 797, 807, 814, 823, 833, 837, 840, 847, 852, 863, 870, 877, 885, 888, 894, 899, 908, 915, 923, 927, 930, 936, 943, 956, 961, 963, 970, 977, 983, 987, 996, 1000, 1005, 1011, 1017, 1023, 1033, 1036, 1052, 1056, 1065, 1068, 1077, 1092, 1105, 1111, 1125, 1132, 1135, 1140, 1146, 1149, 1152, 1164, 1171, 1178, 1192, 1202, 1214, 1221, 1240, 1243, 1247, 1251, 1255, 1260, 1265, 1270, 1275, 1289, 1300, 1306, 1309, 1314, 1323, 1327, 1332, 1337, 1343, 1350, 1355, 1358, 1367, 1383, 1386, 1392, 1401, 1410, 1424, 1434, 1442, 1446, 1455, 1459, 1471, 1474, 1484, 1487, 1494, 1502, 1509, 1512, 1519, 1522, 1527, 1533, 1541, 1547, 1553, 1561, 1566, 1573, 1580, 1588, 1595, 1600, 1605, 1612, 1616, 1619, 1621, 1625, 1628, 1633, 1638, 1643, 1647, 1650, 1652, 1656, 1660, 1664, 1670, 1673, 1676, 1679, 1702, 1728, 1757, 1787, 1793} + +func (i FeatureID) String() string { + if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { + return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VendorUnknown-0] + _ = x[Intel-1] + _ = x[AMD-2] + _ = x[VIA-3] + _ = x[Transmeta-4] + _ = x[NSC-5] + _ = x[KVM-6] + _ = x[MSVM-7] + _ = x[VMware-8] + _ = x[XenHVM-9] + _ = x[Bhyve-10] + _ = x[Hygon-11] + _ = x[SiS-12] + _ = x[RDC-13] + _ = x[Ampere-14] + _ = x[ARM-15] + _ = x[Broadcom-16] + _ = x[Cavium-17] + _ = x[DEC-18] + _ = x[Fujitsu-19] + _ = x[Infineon-20] + _ = x[Motorola-21] + _ = x[NVIDIA-22] + _ = x[AMCC-23] + _ = x[Qualcomm-24] + _ = x[Marvell-25] + _ = x[QEMU-26] + _ = x[QNX-27] + _ = x[ACRN-28] + _ = x[SRE-29] + _ = x[Apple-30] + _ = x[lastVendor-31] +} + +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" + +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} + +func (i Vendor) String() string { + if i < 0 || i >= Vendor(len(_Vendor_index)-1) { + return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go new file mode 100644 index 00000000..da07522e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -0,0 +1,129 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +func detectOS(c *CPUInfo) bool { + if runtime.GOOS != "ios" { + tryToFillCPUInfoFomSysctl(c) + } + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + // TODO: Add more if we know them. + c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) + + return true +} + +func sysctlGetBool(name string) bool { + value, err := unix.SysctlUint32(name) + if err != nil { + return false + } + return value != 0 +} + +func sysctlGetString(name string) string { + value, err := unix.Sysctl(name) + if err != nil { + return "" + } + return value +} + +func sysctlGetInt(unknown int, names ...string) int { + for _, name := range names { + value, err := unix.SysctlUint32(name) + if err != nil { + continue + } + if value != 0 { + return int(value) + } + } + return unknown +} + +func sysctlGetInt64(unknown int, names ...string) int { + for _, name := range names { + value64, err := unix.SysctlUint64(name) + if err != nil { + continue + } + if int(value64) != unknown { + return int(value64) + } + } + return unknown +} + +func setFeature(c *CPUInfo, feature FeatureID, aliases ...string) { + for _, alias := range aliases { + set := sysctlGetBool(alias) + c.featureSet.setIf(set, feature) + if set { + break + } + } +} + +func tryToFillCPUInfoFomSysctl(c *CPUInfo) { + c.BrandName = sysctlGetString("machdep.cpu.brand_string") + + if len(c.BrandName) != 0 { + c.VendorString = strings.Fields(c.BrandName)[0] + } + + c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu") + c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") / + sysctlGetInt(1, "hw.physicalcpu") + c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count") + c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily") + c.Model = sysctlGetInt(0, "machdep.cpu.model") + c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") + c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") + c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") + c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") + + // ARM features: + // + // Note: On some Apple Silicon system, some feats have aliases. See: + // https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics + // When so, we look at all aliases and consider a feature available when at least one identifier matches. + setFeature(c, AESARM, "hw.optional.arm.FEAT_AES") // AES instructions + setFeature(c, ASIMD, "hw.optional.arm.AdvSIMD", "hw.optional.neon") // Advanced SIMD + setFeature(c, ASIMDDP, "hw.optional.arm.FEAT_DotProd") // SIMD Dot Product + setFeature(c, ASIMDHP, "hw.optional.arm.AdvSIMD_HPFPCvt", "hw.optional.neon_hpfp") // Advanced SIMD half-precision floating point + setFeature(c, ASIMDRDM, "hw.optional.arm.FEAT_RDM") // Rounding Double Multiply Accumulate/Subtract + setFeature(c, ATOMICS, "hw.optional.arm.FEAT_LSE", "hw.optional.armv8_1_atomics") // Large System Extensions (LSE) + setFeature(c, CRC32, "hw.optional.arm.FEAT_CRC32", "hw.optional.armv8_crc32") // CRC32/CRC32C instructions + setFeature(c, DCPOP, "hw.optional.arm.FEAT_DPB") // Data cache clean to Point of Persistence (DC CVAP) + setFeature(c, EVTSTRM, "hw.optional.arm.FEAT_ECV") // Generic timer + setFeature(c, FCMA, "hw.optional.arm.FEAT_FCMA", "hw.optional.armv8_3_compnum") // Floating point complex number addition and multiplication + setFeature(c, FHM, "hw.optional.armv8_2_fhm", "hw.optional.arm.FEAT_FHM") // FMLAL and FMLSL instructions + setFeature(c, FP, "hw.optional.floatingpoint") // Single-precision and double-precision floating point + setFeature(c, FPHP, "hw.optional.arm.FEAT_FP16", "hw.optional.neon_fp16") // Half-precision floating point + setFeature(c, GPA, "hw.optional.arm.FEAT_PAuth") // Generic Pointer Authentication + setFeature(c, JSCVT, "hw.optional.arm.FEAT_JSCVT") // Javascript-style double->int convert (FJCVTZS) + setFeature(c, LRCPC, "hw.optional.arm.FEAT_LRCPC") // Weaker release consistency (LDAPR, etc) + setFeature(c, PMULL, "hw.optional.arm.FEAT_PMULL") // Polynomial Multiply instructions (PMULL/PMULL2) + setFeature(c, RNDR, "hw.optional.arm.FEAT_RNG") // Random Number instructions + setFeature(c, TLB, "hw.optional.arm.FEAT_TLBIOS", "hw.optional.arm.FEAT_TLBIRANGE") // Outer Shareable and TLB range maintenance instructions + setFeature(c, TS, "hw.optional.arm.FEAT_FlagM", "hw.optional.arm.FEAT_FlagM2") // Flag manipulation instructions + setFeature(c, SHA1, "hw.optional.arm.FEAT_SHA1") // SHA-1 instructions (SHA1C, etc) + setFeature(c, SHA2, "hw.optional.arm.FEAT_SHA256") // SHA-2 instructions (SHA256H, etc) + setFeature(c, SHA3, "hw.optional.arm.FEAT_SHA3") // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + setFeature(c, SHA512, "hw.optional.arm.FEAT_SHA512") // SHA512 instructions + setFeature(c, SM3, "hw.optional.arm.FEAT_SM3") // SM3 instructions + setFeature(c, SM4, "hw.optional.arm.FEAT_SM4") // SM4 instructions + setFeature(c, SVE, "hw.optional.arm.FEAT_SVE") // Scalable Vector Extension +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go new file mode 100644 index 00000000..d96d2443 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -0,0 +1,208 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file located +// here https://github.com/golang/sys/blob/master/LICENSE + +package cpuid + +import ( + "encoding/binary" + "io/ioutil" + "runtime" +) + +// HWCAP bits. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + hwcap_USCAT = 1 << 25 + hwcap_ILRCPC = 1 << 26 + hwcap_FLAGM = 1 << 27 + hwcap_SSBS = 1 << 28 + hwcap_SB = 1 << 29 + hwcap_PACA = 1 << 30 + hwcap_PACG = 1 << 31 + hwcap_GCS = 1 << 32 + + hwcap2_DCPODP = 1 << 0 + hwcap2_SVE2 = 1 << 1 + hwcap2_SVEAES = 1 << 2 + hwcap2_SVEPMULL = 1 << 3 + hwcap2_SVEBITPERM = 1 << 4 + hwcap2_SVESHA3 = 1 << 5 + hwcap2_SVESM4 = 1 << 6 + hwcap2_FLAGM2 = 1 << 7 + hwcap2_FRINT = 1 << 8 + hwcap2_SVEI8MM = 1 << 9 + hwcap2_SVEF32MM = 1 << 10 + hwcap2_SVEF64MM = 1 << 11 + hwcap2_SVEBF16 = 1 << 12 + hwcap2_I8MM = 1 << 13 + hwcap2_BF16 = 1 << 14 + hwcap2_DGH = 1 << 15 + hwcap2_RNG = 1 << 16 + hwcap2_BTI = 1 << 17 + hwcap2_MTE = 1 << 18 + hwcap2_ECV = 1 << 19 + hwcap2_AFP = 1 << 20 + hwcap2_RPRES = 1 << 21 + hwcap2_MTE3 = 1 << 22 + hwcap2_SME = 1 << 23 + hwcap2_SME_I16I64 = 1 << 24 + hwcap2_SME_F64F64 = 1 << 25 + hwcap2_SME_I8I32 = 1 << 26 + hwcap2_SME_F16F32 = 1 << 27 + hwcap2_SME_B16F32 = 1 << 28 + hwcap2_SME_F32F32 = 1 << 29 + hwcap2_SME_FA64 = 1 << 30 + hwcap2_WFXT = 1 << 31 + hwcap2_EBF16 = 1 << 32 + hwcap2_SVE_EBF16 = 1 << 33 + hwcap2_CSSC = 1 << 34 + hwcap2_RPRFM = 1 << 35 + hwcap2_SVE2P1 = 1 << 36 + hwcap2_SME2 = 1 << 37 + hwcap2_SME2P1 = 1 << 38 + hwcap2_SME_I16I32 = 1 << 39 + hwcap2_SME_BI32I32 = 1 << 40 + hwcap2_SME_B16B16 = 1 << 41 + hwcap2_SME_F16F16 = 1 << 42 + hwcap2_MOPS = 1 << 43 + hwcap2_HBC = 1 << 44 + hwcap2_SVE_B16B16 = 1 << 45 + hwcap2_LRCPC3 = 1 << 46 + hwcap2_LSE128 = 1 << 47 + hwcap2_FPMR = 1 << 48 + hwcap2_LUT = 1 << 49 + hwcap2_FAMINMAX = 1 << 50 + hwcap2_F8CVT = 1 << 51 + hwcap2_F8FMA = 1 << 52 + hwcap2_F8DP4 = 1 << 53 + hwcap2_F8DP2 = 1 << 54 + hwcap2_F8E4M3 = 1 << 55 + hwcap2_F8E5M2 = 1 << 56 + hwcap2_SME_LUTV2 = 1 << 57 + hwcap2_SME_F8F16 = 1 << 58 + hwcap2_SME_F8F32 = 1 << 59 + hwcap2_SME_SF8FMA = 1 << 60 + hwcap2_SME_SF8DP4 = 1 << 61 + hwcap2_SME_SF8DP2 = 1 << 62 + hwcap2_POE = 1 << 63 +) + +func detectOS(c *CPUInfo) bool { + // For now assuming no hyperthreading is reasonable. + c.LogicalCores = runtime.NumCPU() + c.PhysicalCores = c.LogicalCores + c.ThreadsPerCore = 1 + if hwcap == 0 { + // We did not get values from the runtime. + // Try reading /proc/self/auxv + + // From https://github.com/golang/sys + const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + uintSize = int(32 << (^uint(0) >> 63)) + ) + + buf, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return false + } + bo := binary.LittleEndian + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwcap = val + case _AT_HWCAP2: + // Not used + } + } + if hwcap == 0 { + return false + } + } + + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) + c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) + c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) + c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) + c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) + c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM) + c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) + c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) + c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) + c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) + c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TLB) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TS) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) + c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) + c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) + c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) + + return true +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go new file mode 100644 index 00000000..8733ba34 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !linux && !darwin +// +build arm64,!linux,!darwin + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return false +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go new file mode 100644 index 00000000..f8f201b5 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go @@ -0,0 +1,8 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build nounsafe +// +build nounsafe + +package cpuid + +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go new file mode 100644 index 00000000..92af622e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build !nounsafe +// +build !nounsafe + +package cpuid + +import _ "unsafe" // needed for go:linkname + +//go:linkname hwcap internal/cpu.HWCap +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh new file mode 100644 index 00000000..471d986d --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null . + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . +done diff --git a/vendor/github.com/lucasb-eyer/go-colorful/CHANGELOG.md b/vendor/github.com/lucasb-eyer/go-colorful/CHANGELOG.md index 84f9c7b2..a10d3fc8 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/CHANGELOG.md +++ b/vendor/github.com/lucasb-eyer/go-colorful/CHANGELOG.md @@ -6,8 +6,32 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm The format of this file is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), but only releases after v1.0.3 properly adhere to it. +## [Unreleased] + +## [1.3.0] - 2025-09-08 +### Added +- `BlendLinearRgb` (#50) +- `DistanceRiemersma` (#52) +- Introduce a function for sorting colors (#57) +- YAML marshal/unmarshal support (#63) +- Add support for OkLab and OkLch (#66) +- Functions that use randomness now support specifying a custom source (#73) +- Functions BlendOkLab and BlendOkLch (#70) + +## Changed +- `Hex()` parsing is much faster (#78) + +### Fixed +- Fix bug when doing HSV/HCL blending between a gray color and non-gray color (#60) +- Docs for HSV/HSL were updated to note that hue 360 is not allowed (#71) + +### Deprecated +- `DistanceLinearRGB` is deprecated for the name `DistanceLinearRgb` which is more in-line with the rest of the library + ## [1.2.0] - 2021-01-27 +This is the same as the v1.1.0 tag. + ### Added - HSLuv and HPLuv color spaces (#41, #51) - CIE LCh(uv) color space, called `LuvLCh` in code (#51) diff --git a/vendor/github.com/lucasb-eyer/go-colorful/README.md b/vendor/github.com/lucasb-eyer/go-colorful/README.md index 8b9bd499..b3bb545c 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/README.md +++ b/vendor/github.com/lucasb-eyer/go-colorful/README.md @@ -1,6 +1,7 @@ go-colorful =========== +[![Go Reference](https://pkg.go.dev/badge/github.com/lucasb-eyer/go-colorful.svg)](https://pkg.go.dev/github.com/lucasb-eyer/go-colorful) [![go reportcard](https://goreportcard.com/badge/github.com/lucasb-eyer/go-colorful)](https://goreportcard.com/report/github.com/lucasb-eyer/go-colorful) A library for playing with colors in Go. Supports Go 1.13 onwards. @@ -29,10 +30,10 @@ Go-Colorful stores colors in RGB and provides methods from converting these to v - **CIE-xyY:** encodes chromacity in x and y and luminance in Y, all in [0..1] - **CIE-L\*a\*b\*:** A *perceptually uniform* color space, i.e. distances are meaningful. L\* in [0..1] and a\*, b\* almost in [-1..1]. - **CIE-L\*u\*v\*:** Very similar to CIE-L\*a\*b\*, there is [no consensus](http://en.wikipedia.org/wiki/CIELUV#Historical_background) on which one is "better". -- **CIE-L\*C\*h° (HCL):** This is generally the [most useful](http://vis4.net/blog/posts/avoid-equidistant-hsv-colors/) one; CIE-L\*a\*b\* space in polar coordinates, i.e. a *better* HSV. H° is in [0..360], C\* almost in [-1..1] and L\* as in CIE-L\*a\*b\*. -- **CIE LCh(uv):** Called `LuvLCh` in code, this is a cylindrical transformation of the CIE-L\*u\*v\* color space. Like HCL above: H° is in [0..360], C\* almost in [-1..1] and L\* as in CIE-L\*u\*v\*. +- **CIE-L\*C\*h° (HCL):** This is generally the [most useful](http://vis4.net/blog/posts/avoid-equidistant-hsv-colors/) one; CIE-L\*a\*b\* space in polar coordinates, i.e. a *better* HSV. H° is in [0..360], C\* almost in [0..1] and L\* as in CIE-L\*a\*b\*. +- **CIE LCh(uv):** Called `LuvLCh` in code, this is a cylindrical transformation of the CIE-L\*u\*v\* color space. Like HCL above: H° is in [0..360], C\* almost in [0..1] and L\* as in CIE-L\*u\*v\*. - **HSLuv:** The better alternative to HSL, see [here](https://www.hsluv.org/) and [here](https://www.kuon.ch/post/2020-03-08-hsluv/). Hue in [0..360], Saturation and Luminance in [0..1]. -- **HPLuv:** A variant of HSLuv. The color space is smoother, but only pastel colors can be included. Because the valid colors are limited, it's easy to get invalid Saturation values way above 1.0, indicating the color can't be represented in HPLuv beccause it's not pastel. +- **HPLuv:** A variant of HSLuv. The color space is smoother, but only pastel colors can be included. Because the valid colors are limited, it's easy to get invalid Saturation values way above 1.0, indicating the color can't be represented in HPLuv because it's not pastel. For the colorspaces where it makes sense (XYZ, Lab, Luv, HCl), the [D65](http://en.wikipedia.org/wiki/Illuminant_D65) is used as reference white @@ -52,14 +53,6 @@ Nice, but what's it useful for? - Generating random colors under some constraints (e.g. colors of the same shade, or shades of one color.) - Generating gorgeous random palettes with distinct colors of a same temperature. -What not (yet)? -=============== -There are a few features which are currently missing and might be useful. -I just haven't implemented them yet because I didn't have the need for it. -Pull requests welcome. - -- Sorting colors (potentially using above mentioned distances) - So which colorspace should I use? ================================= It depends on what you want to do. I think the folks from *I want hue* are @@ -139,7 +132,7 @@ alpha colors, this means the RGB values are lost (set to 0) and it's impossible to recover them. In such a case `MakeColor` will return `false` as its second value. ### Comparing colors -In the RGB color space, the Euclidian distance between colors *doesn't* correspond +In the RGB color space, the Euclidean distance between colors *doesn't* correspond to visual/perceptual distance. This means that two pairs of colors which have the same distance in RGB space can look much further apart. This is fixed by the CIE-L\*a\*b\*, CIE-L\*u\*v\* and CIE-L\*C\*h° color spaces. @@ -208,7 +201,7 @@ What you see is that HSV is really bad: it adds some green, which is not present in the original colors at all! RGB is much better, but it stays light a little too long. LUV and LAB both hit the right lightness but LAB has a little more color. HCL works in the same vein as HSV (both cylindrical interpolations) but -it does it right in that there is no green appearing and the lighthness changes +it does it right in that there is no green appearing and the lightness changes in a linear manner. While this seems all good, you need to know one thing: When interpolating in any @@ -316,11 +309,11 @@ generating this picture in `doc/colorgens/colorgens.go`. ### Getting random palettes As soon as you need to generate more than one random color, you probably want -them to be distinguishible. Playing against an opponent which has almost the +them to be distinguishable. Playing against an opponent which has almost the same blue as I do is not fun. This is where random palettes can help. These palettes are generated using an algorithm which ensures that all colors -on the palette are as distinguishible as possible. Again, there is a `Fast` +on the palette are as distinguishable as possible. Again, there is a `Fast` method which works in HSV and is less perceptually uniform and a non-`Fast` method which works in CIE spaces. For more theory on `SoftPalette`, check out [I want hue](http://tools.medialab.sciences-po.fr/iwanthue/theory.php). Yet @@ -372,10 +365,18 @@ from top to bottom: `Warm`, `FastWarm`, `Happy`, `FastHappy`, `Soft`, Again, the code used for generating the above image is available as [doc/palettegens/palettegens.go](https://github.com/lucasb-eyer/go-colorful/blob/master/doc/palettegens/palettegens.go). ### Sorting colors -TODO: Sort using dist fn. + +Sorting colors is not a well-defined operation. For example, {dark blue, dark red, light blue, light red} is already sorted if darker colors should precede lighter colors but would need to be re-sorted as {dark red, light red, dark blue, light blue} if longer-wavelength colors should precede shorter-wavelength colors. + +Go-Colorful's `Sorted` function orders a list of colors so as to minimize the average distance between adjacent colors, including between the last and the first. (`Sorted` does not necessarily find the true minimum, only a reasonably close approximation.) The following picture, drawn by [doc/colorsort/colorsort.go](https://github.com/lucasb-eyer/go-colorful/blob/master/doc/colorsort/colorsort.go), illustrates `Sorted`'s behavior: + +![Sorting colors](doc/colorsort/colorsort.png) + +The first row represents the input: a slice of 512 randomly chosen colors. The second row shows the colors sorted in CIE-L\*C\*h° space, ordered first by lightness (L), then by hue angle (h), and finally by chroma (C). Note that distracting pinstripes permeate the colors. Sorting using *any* color space and *any* ordering of the channels yields a similar pinstriped pattern. The third row of the image was sorted using Go-Colorful's `Sorted` function. Although the colors do not appear to be in any particular order, the sequence at least appears smoother than the one sorted by channel. + ### Using linear RGB for computations -There are two methods for transforming RGB<->Linear RGB: a fast and almost precise one, +There are two methods for transforming RGB⟷Linear RGB: a fast and almost precise one, and a slow and precise one. ```go @@ -472,9 +473,9 @@ Who? ==== This library was developed by Lucas Beyer with contributions from -Bastien Dejean (@baskerville), Phil Kulak (@pkulak) and Christian Muehlhaeuser (@muesli). +Bastien Dejean (@baskerville), Phil Kulak (@pkulak), Christian Muehlhaeuser (@muesli), and Scott Pakin (@spakin). -It is now maintained by makeworld (@makeworld-the-better-one). +It is now maintained by makeworld (@makew0rld). ## License diff --git a/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go b/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go index 2e2e49e1..ac697d65 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go @@ -2,28 +2,32 @@ package colorful -import ( - "math/rand" -) - // Creates a random dark, "warm" color through a restricted HSV space. -func FastWarmColor() Color { +func FastWarmColorWithRand(rand RandInterface) Color { return Hsv( rand.Float64()*360.0, 0.5+rand.Float64()*0.3, 0.3+rand.Float64()*0.3) } +func FastWarmColor() Color { + return FastWarmColorWithRand(getDefaultGlobalRand()) +} + // Creates a random dark, "warm" color through restricted HCL space. // This is slower than FastWarmColor but will likely give you colors which have // the same "warmness" if you run it many times. -func WarmColor() (c Color) { - for c = randomWarm(); !c.IsValid(); c = randomWarm() { +func WarmColorWithRand(rand RandInterface) (c Color) { + for c = randomWarmWithRand(rand); !c.IsValid(); c = randomWarmWithRand(rand) { } return } -func randomWarm() Color { +func WarmColor() (c Color) { + return WarmColorWithRand(getDefaultGlobalRand()) +} + +func randomWarmWithRand(rand RandInterface) Color { return Hcl( rand.Float64()*360.0, 0.1+rand.Float64()*0.3, @@ -31,23 +35,31 @@ func randomWarm() Color { } // Creates a random bright, "pimpy" color through a restricted HSV space. -func FastHappyColor() Color { +func FastHappyColorWithRand(rand RandInterface) Color { return Hsv( rand.Float64()*360.0, 0.7+rand.Float64()*0.3, 0.6+rand.Float64()*0.3) } +func FastHappyColor() Color { + return FastHappyColorWithRand(getDefaultGlobalRand()) +} + // Creates a random bright, "pimpy" color through restricted HCL space. // This is slower than FastHappyColor but will likely give you colors which // have the same "brightness" if you run it many times. -func HappyColor() (c Color) { - for c = randomPimp(); !c.IsValid(); c = randomPimp() { +func HappyColorWithRand(rand RandInterface) (c Color) { + for c = randomPimpWithRand(rand); !c.IsValid(); c = randomPimpWithRand(rand) { } return } -func randomPimp() Color { +func HappyColor() (c Color) { + return HappyColorWithRand(getDefaultGlobalRand()) +} + +func randomPimpWithRand(rand RandInterface) Color { return Hcl( rand.Float64()*360.0, 0.5+rand.Float64()*0.3, diff --git a/vendor/github.com/lucasb-eyer/go-colorful/colors.go b/vendor/github.com/lucasb-eyer/go-colorful/colors.go index 0d5bffe5..17441a8c 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/colors.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/colors.go @@ -5,6 +5,7 @@ import ( "fmt" "image/color" "math" + "strconv" ) // A color is stored internally using sRGB (standard RGB) values in the range 0-1 @@ -94,15 +95,39 @@ func (c1 Color) DistanceRgb(c2 Color) float64 { return math.Sqrt(sq(c1.R-c2.R) + sq(c1.G-c2.G) + sq(c1.B-c2.B)) } -// DistanceLinearRGB computes the distance between two colors in linear RGB +// DistanceLinearRgb computes the distance between two colors in linear RGB // space. This is not useful for measuring how humans perceive color, but // might be useful for other things, like dithering. -func (c1 Color) DistanceLinearRGB(c2 Color) float64 { +func (c1 Color) DistanceLinearRgb(c2 Color) float64 { r1, g1, b1 := c1.LinearRgb() r2, g2, b2 := c2.LinearRgb() return math.Sqrt(sq(r1-r2) + sq(g1-g2) + sq(b1-b2)) } +// DistanceLinearRGB is deprecated in favour of DistanceLinearRgb. +// They do the exact same thing. +func (c1 Color) DistanceLinearRGB(c2 Color) float64 { + return c1.DistanceLinearRgb(c2) +} + +// DistanceRiemersma is a color distance algorithm developed by Thiadmer Riemersma. +// It uses RGB coordinates, but he claims it has similar results to CIELUV. +// This makes it both fast and accurate. +// +// Sources: +// +// https://www.compuphase.com/cmetric.htm +// https://github.com/lucasb-eyer/go-colorful/issues/52 +func (c1 Color) DistanceRiemersma(c2 Color) float64 { + rAvg := (c1.R + c2.R) / 2.0 + // Deltas + dR := c1.R - c2.R + dG := c1.G - c2.G + dB := c1.B - c2.B + + return math.Sqrt((2+rAvg)*dR*dR + 4*dG*dG + (2+(1-rAvg))*dB*dB) +} + // Check for equality between colors within the tolerance Delta (1/255). func (c1 Color) AlmostEqualRgb(c2 Color) bool { return math.Abs(c1.R-c2.R)+ @@ -112,9 +137,11 @@ func (c1 Color) AlmostEqualRgb(c2 Color) bool { // You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl. func (c1 Color) BlendRgb(c2 Color, t float64) Color { - return Color{c1.R + t*(c2.R-c1.R), + return Color{ + c1.R + t*(c2.R-c1.R), c1.G + t*(c2.G-c1.G), - c1.B + t*(c2.B-c1.B)} + c1.B + t*(c2.B-c1.B), + } } // Utility used by Hxx color-spaces for interpolating between two angles in [0,360]. @@ -128,9 +155,9 @@ func interp_angle(a0, a1, t float64) float64 { /// HSV /// /////////// // From http://en.wikipedia.org/wiki/HSL_and_HSV -// Note that h is in [0..360] and s,v in [0..1] +// Note that h is in [0..359] and s,v in [0..1] -// Hsv returns the Hue [0..360], Saturation and Value [0..1] of the color. +// Hsv returns the Hue [0..359], Saturation and Value [0..1] of the color. func (col Color) Hsv() (h, s, v float64) { min := math.Min(math.Min(col.R, col.G), col.B) v = math.Max(math.Max(col.R, col.G), col.B) @@ -160,7 +187,7 @@ func (col Color) Hsv() (h, s, v float64) { return } -// Hsv creates a new Color given a Hue in [0..360], a Saturation and a Value in [0..1] +// Hsv creates a new Color given a Hue in [0..359], a Saturation and a Value in [0..1] func Hsv(H, S, V float64) Color { Hp := H / 60.0 C := V * S @@ -198,6 +225,13 @@ func (c1 Color) BlendHsv(c2 Color, t float64) Color { h1, s1, v1 := c1.Hsv() h2, s2, v2 := c2.Hsv() + // https://github.com/lucasb-eyer/go-colorful/pull/60 + if s1 == 0 && s2 != 0 { + h1 = h2 + } else if s2 == 0 && s1 != 0 { + h2 = h1 + } + // We know that h are both in [0..360] return Hsv(interp_angle(h1, h2, t), s1+t*(s2-s1), v1+t*(v2-v1)) } @@ -205,7 +239,7 @@ func (c1 Color) BlendHsv(c2 Color, t float64) Color { /// HSL /// /////////// -// Hsl returns the Hue [0..360], Saturation [0..1], and Luminance (lightness) [0..1] of the color. +// Hsl returns the Hue [0..359], Saturation [0..1], and Luminance (lightness) [0..1] of the color. func (col Color) Hsl() (h, s, l float64) { min := math.Min(math.Min(col.R, col.G), col.B) max := math.Max(math.Max(col.R, col.G), col.B) @@ -240,7 +274,7 @@ func (col Color) Hsl() (h, s, l float64) { return } -// Hsl creates a new Color given a Hue in [0..360], a Saturation [0..1], and a Luminance (lightness) in [0..1] +// Hsl creates a new Color given a Hue in [0..359], a Saturation [0..1], and a Luminance (lightness) in [0..1] func Hsl(h, s, l float64) Color { if s == 0 { return Color{l, l, l} @@ -331,23 +365,46 @@ func (col Color) Hex() string { // Hex parses a "html" hex color-string, either in the 3 "#f0c" or 6 "#ff1034" digits form. func Hex(scol string) (Color, error) { - format := "#%02x%02x%02x" - factor := 1.0 / 255.0 - if len(scol) == 4 { - format = "#%1x%1x%1x" - factor = 1.0 / 15.0 - } - - var r, g, b uint8 - n, err := fmt.Sscanf(scol, format, &r, &g, &b) - if err != nil { - return Color{}, err - } - if n != 3 { + if scol == "" || scol[0] != '#' { return Color{}, fmt.Errorf("color: %v is not a hex-color", scol) } + var c Color + var err error + switch len(scol) { + case 4: + c, err = parseHexColor(scol[1:2], scol[2:3], scol[3:4], 4, 1.0/15.0) + case 7: + c, err = parseHexColor(scol[1:3], scol[3:5], scol[5:7], 8, 1.0/255.0) + default: + return Color{}, fmt.Errorf("color: %v is not a hex-color", scol) + } + if err != nil { + return Color{}, fmt.Errorf("color: %v is not a hex-color: %w", scol, err) + } + return c, nil +} - return Color{float64(r) * factor, float64(g) * factor, float64(b) * factor}, nil +func parseHexColor(r, g, b string, bits int, factor float64) (Color, error) { + var c Color + var v uint64 + var err error + + if v, err = strconv.ParseUint(r, 16, bits); err != nil { + return Color{}, err + } + c.R = float64(v) * factor + + if v, err = strconv.ParseUint(g, 16, bits); err != nil { + return Color{}, err + } + c.G = float64(v) * factor + + if v, err = strconv.ParseUint(b, 16, bits); err != nil { + return Color{}, err + } + c.B = float64(v) * factor + + return c, err } /// Linear /// @@ -377,7 +434,7 @@ func linearize_fast(v float64) float64 { v2 := v1 * v1 v3 := v2 * v1 v4 := v2 * v2 - //v5 := v3*v2 + // v5 := v3*v2 return -0.248750514614486 + 0.925583310193438*v + 1.16740237321695*v2 + 0.280457026598666*v3 - 0.0757991963780179*v4 //+ 0.0437040411548932*v5 } @@ -450,6 +507,19 @@ func LinearRgbToXyz(r, g, b float64) (x, y, z float64) { return } +// BlendLinearRgb blends two colors in the Linear RGB color-space. +// Unlike BlendRgb, this will not produce dark color around the center. +// t == 0 results in c1, t == 1 results in c2 +func (c1 Color) BlendLinearRgb(c2 Color, t float64) Color { + r1, g1, b1 := c1.LinearRgb() + r2, g2, b2 := c2.LinearRgb() + return LinearRgb( + r1+t*(r2-r1), + g1+t*(g2-g1), + b1+t*(b2-b1), + ) +} + /// XYZ /// /////////// // http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/ @@ -784,7 +854,7 @@ func LuvToXyz(l, u, v float64) (x, y, z float64) { } func LuvToXyzWhiteRef(l, u, v float64, wref [3]float64) (x, y, z float64) { - //y = wref[1] * lab_finv((l + 0.16) / 1.16) + // y = wref[1] * lab_finv((l + 0.16) / 1.16) if l <= 0.08 { y = wref[1] * l * 100.0 * 3.0 / 29.0 * 3.0 / 29.0 * 3.0 / 29.0 } else { @@ -913,6 +983,13 @@ func (col1 Color) BlendHcl(col2 Color, t float64) Color { h1, c1, l1 := col1.Hcl() h2, c2, l2 := col2.Hcl() + // https://github.com/lucasb-eyer/go-colorful/pull/60 + if c1 <= 0.00015 && c2 >= 0.00015 { + h1 = h2 + } else if c2 <= 0.00015 && c1 >= 0.00015 { + h2 = h1 + } + // We know that h are both in [0..360] return Hcl(interp_angle(h1, h2, t), c1+t*(c2-c1), l1+t*(l2-l1)).Clamped() } @@ -977,3 +1054,103 @@ func (col1 Color) BlendLuvLCh(col2 Color, t float64) Color { // We know that h are both in [0..360] return LuvLCh(l1+t*(l2-l1), c1+t*(c2-c1), interp_angle(h1, h2, t)) } + +/// OkLab /// +/////////// + +func (col Color) OkLab() (l, a, b float64) { + return XyzToOkLab(col.Xyz()) +} + +func OkLab(l, a, b float64) Color { + return Xyz(OkLabToXyz(l, a, b)) +} + +func XyzToOkLab(x, y, z float64) (l, a, b float64) { + l_ := math.Cbrt(0.8189330101*x + 0.3618667424*y - 0.1288597137*z) + m_ := math.Cbrt(0.0329845436*x + 0.9293118715*y + 0.0361456387*z) + s_ := math.Cbrt(0.0482003018*x + 0.2643662691*y + 0.6338517070*z) + l = 0.2104542553*l_ + 0.7936177850*m_ - 0.0040720468*s_ + a = 1.9779984951*l_ - 2.4285922050*m_ + 0.4505937099*s_ + b = 0.0259040371*l_ + 0.7827717662*m_ - 0.8086757660*s_ + return +} + +func OkLabToXyz(l, a, b float64) (x, y, z float64) { + l_ := 0.9999999984505196*l + 0.39633779217376774*a + 0.2158037580607588*b + m_ := 1.0000000088817607*l - 0.10556134232365633*a - 0.0638541747717059*b + s_ := 1.0000000546724108*l - 0.08948418209496574*a - 1.2914855378640917*b + + ll := math.Pow(l_, 3) + m := math.Pow(m_, 3) + s := math.Pow(s_, 3) + + x = 1.2268798733741557*ll - 0.5578149965554813*m + 0.28139105017721594*s + y = -0.04057576262431372*ll + 1.1122868293970594*m - 0.07171106666151696*s + z = -0.07637294974672142*ll - 0.4214933239627916*m + 1.5869240244272422*s + + return +} + +// BlendOkLab blends two colors in the OkLab color-space, which should result in a better blend (even compared to BlendLab). +func (c1 Color) BlendOkLab(c2 Color, t float64) Color { + l1, a1, b1 := c1.OkLab() + l2, a2, b2 := c2.OkLab() + return OkLab(l1+t*(l2-l1), + a1+t*(a2-a1), + b1+t*(b2-b1)) +} + +/// OkLch /// +/////////// + +func (col Color) OkLch() (l, c, h float64) { + return OkLabToOkLch(col.OkLab()) +} + +func OkLch(l, c, h float64) Color { + return Xyz(OkLchToXyz(l, c, h)) +} + +func XyzToOkLch(x, y, z float64) (float64, float64, float64) { + l, c, h := OkLabToOkLch(XyzToOkLab(x, y, z)) + return l, c, h +} + +func OkLchToXyz(l, c, h float64) (float64, float64, float64) { + x, y, z := OkLabToXyz(OkLchToOkLab(l, c, h)) + return x, y, z +} + +func OkLabToOkLch(l, a, b float64) (float64, float64, float64) { + c := math.Sqrt((a * a) + (b * b)) + h := math.Atan2(b, a) + if h < 0 { + h += 2 * math.Pi + } + + return l, c, h * 180 / math.Pi +} + +func OkLchToOkLab(l, c, h float64) (float64, float64, float64) { + h *= math.Pi / 180 + a := c * math.Cos(h) + b := c * math.Sin(h) + return l, a, b +} + +// BlendOkLch blends two colors in the OkLch color-space, which should result in a better blend (even compared to BlendHcl). +func (col1 Color) BlendOkLch(col2 Color, t float64) Color { + l1, c1, h1 := col1.OkLch() + l2, c2, h2 := col2.OkLch() + + // https://github.com/lucasb-eyer/go-colorful/pull/60 + if c1 <= 0.00015 && c2 >= 0.00015 { + h1 = h2 + } else if c2 <= 0.00015 && c1 >= 0.00015 { + h2 = h1 + } + + // We know that h are both in [0..360] + return OkLch(l1+t*(l2-l1), c1+t*(c2-c1), interp_angle(h1, h2, t)).Clamped() +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go index bb66dfa4..0cb9286c 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go @@ -1,13 +1,9 @@ package colorful -import ( - "math/rand" -) - // Uses the HSV color space to generate colors with similar S,V but distributed // evenly along their Hue. This is fast but not always pretty. // If you've got time to spare, use Lab (the non-fast below). -func FastHappyPalette(colorsCount int) (colors []Color) { +func FastHappyPaletteWithRand(colorsCount int, rand RandInterface) (colors []Color) { colors = make([]Color, colorsCount) for i := 0; i < colorsCount; i++ { @@ -16,10 +12,18 @@ func FastHappyPalette(colorsCount int) (colors []Color) { return } -func HappyPalette(colorsCount int) ([]Color, error) { +func FastHappyPalette(colorsCount int) (colors []Color) { + return FastHappyPaletteWithRand(colorsCount, getDefaultGlobalRand()) +} + +func HappyPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { pimpy := func(l, a, b float64) bool { _, c, _ := LabToHcl(l, a, b) return 0.3 <= c && 0.4 <= l && l <= 0.8 } - return SoftPaletteEx(colorsCount, SoftPaletteSettings{pimpy, 50, true}) + return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{pimpy, 50, true}, rand) +} + +func HappyPalette(colorsCount int) ([]Color, error) { + return HappyPaletteWithRand(colorsCount, getDefaultGlobalRand()) } diff --git a/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go b/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go index 76f31d8f..ad8b06cc 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go @@ -65,3 +65,23 @@ func (hc *HexColor) Decode(hexCode string) error { *hc = HexColor(col) return nil } + +func (hc HexColor) MarshalYAML() (interface{}, error) { + return Color(hc).Hex(), nil +} + +func (hc *HexColor) UnmarshalYAML(unmarshal func(interface{}) error) error { + var hexCode string + if err := unmarshal(&hexCode); err != nil { + return err + } + + var col, err = Hex(hexCode) + if err != nil { + return err + } + + *hc = HexColor(col) + + return nil +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/hsluv.go b/vendor/github.com/lucasb-eyer/go-colorful/hsluv.go index d19fb644..cc514882 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/hsluv.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/hsluv.go @@ -11,7 +11,8 @@ import "math" // comparing to the test values, this modified white reference is used internally. // // See this GitHub thread for details on these values: -// https://github.com/hsluv/hsluv/issues/79 +// +// https://github.com/hsluv/hsluv/issues/79 var hSLuvD65 = [3]float64{0.95045592705167, 1.0, 1.089057750759878} func LuvLChToHSLuv(l, c, h float64) (float64, float64, float64) { @@ -115,7 +116,7 @@ func (col Color) HPLuv() (h, s, l float64) { return LuvLChToHPLuv(col.LuvLChWhiteRef(hSLuvD65)) } -// DistanceHSLuv calculates Euclidan distance in the HSLuv colorspace. No idea +// DistanceHSLuv calculates Euclidean distance in the HSLuv colorspace. No idea // how useful this is. // // The Hue value is divided by 100 before the calculation, so that H, S, and L diff --git a/vendor/github.com/lucasb-eyer/go-colorful/rand.go b/vendor/github.com/lucasb-eyer/go-colorful/rand.go new file mode 100644 index 00000000..d3a2d5b5 --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/rand.go @@ -0,0 +1,22 @@ +package colorful + +import "math/rand" + +type RandInterface interface { + Float64() float64 + Intn(n int) int +} + +type defaultGlobalRand struct{} + +func (df defaultGlobalRand) Float64() float64 { + return rand.Float64() +} + +func (df defaultGlobalRand) Intn(n int) int { + return rand.Intn(n) +} + +func getDefaultGlobalRand() RandInterface { + return defaultGlobalRand{} +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go index 9f7bf6f7..6d8aa137 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go @@ -6,7 +6,6 @@ package colorful import ( "fmt" "math" - "math/rand" ) // The algorithm works in L*a*b* color space and converts to RGB in the end. @@ -32,7 +31,7 @@ type SoftPaletteSettings struct { // as a new palette of distinctive colors. Falls back to K-medoid if the mean // happens to fall outside of the color-space, which can only happen if you // specify a CheckColor function. -func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) { +func SoftPaletteExWithRand(colorsCount int, settings SoftPaletteSettings, rand RandInterface) ([]Color, error) { // Checks whether it's a valid RGB and also fulfills the potentially provided constraint. check := func(col lab_t) bool { @@ -79,7 +78,7 @@ func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, erro // The actual k-means/medoid iterations for i := 0; i < settings.Iterations; i++ { - // Reassing the samples to clusters, i.e. to their closest mean. + // Reassigning the samples to clusters, i.e. to their closest mean. // By the way, also check if any sample is used as a medoid and if so, mark that. for isample, sample := range samples { samples_used[isample] = false @@ -100,7 +99,7 @@ func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, erro // Compute new means according to the samples. for imean := range means { - // The new mean is the average of all samples belonging to it.. + // The new mean is the average of all samples belonging to it. nsamples := 0 newmean := lab_t{0.0, 0.0, 0.0} for isample, sample := range samples { @@ -148,9 +147,17 @@ func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, erro return labs2cols(means), nil } +func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) { + return SoftPaletteExWithRand(colorsCount, settings, getDefaultGlobalRand()) +} + // A wrapper which uses common parameters. +func SoftPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { + return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{nil, 50, false}, rand) +} + func SoftPalette(colorsCount int) ([]Color, error) { - return SoftPaletteEx(colorsCount, SoftPaletteSettings{nil, 50, false}) + return SoftPaletteWithRand(colorsCount, getDefaultGlobalRand()) } func in(haystack []lab_t, upto int, needle lab_t) bool { diff --git a/vendor/github.com/lucasb-eyer/go-colorful/sort.go b/vendor/github.com/lucasb-eyer/go-colorful/sort.go new file mode 100644 index 00000000..b1c1b681 --- /dev/null +++ b/vendor/github.com/lucasb-eyer/go-colorful/sort.go @@ -0,0 +1,191 @@ +// This file provides functions for sorting colors. + +package colorful + +import ( + "math" + "sort" +) + +// An element represents a single element of a set. It is used to +// implement a disjoint-set forest. +type element struct { + parent *element // Parent element + rank int // Rank (approximate depth) of the subtree with this element as root +} + +// newElement creates a singleton set and returns its sole element. +func newElement() *element { + s := &element{} + s.parent = s + return s +} + +// find returns an arbitrary element of a set when invoked on any element of +// the set, The important feature is that it returns the same value when +// invoked on any element of the set. Consequently, it can be used to test if +// two elements belong to the same set. +func (e *element) find() *element { + for e.parent != e { + e.parent = e.parent.parent + e = e.parent + } + return e +} + +// union establishes the union of two sets when given an element from each set. +// Afterwards, the original sets no longer exist as separate entities. +func union(e1, e2 *element) { + // Ensure the two elements aren't already part of the same union. + e1Root := e1.find() + e2Root := e2.find() + if e1Root == e2Root { + return + } + + // Create a union by making the shorter tree point to the root of the + // larger tree. + switch { + case e1Root.rank < e2Root.rank: + e1Root.parent = e2Root + case e1Root.rank > e2Root.rank: + e2Root.parent = e1Root + default: + e2Root.parent = e1Root + e1Root.rank++ + } +} + +// An edgeIdxs describes an edge in a graph or tree. The vertices in the edge +// are indexes into a list of Color values. +type edgeIdxs [2]int + +// An edgeDistance is a map from an edge (pair of indices) to a distance +// between the two vertices. +type edgeDistance map[edgeIdxs]float64 + +// allToAllDistancesCIEDE2000 computes the CIEDE2000 distance between each pair of +// colors. It returns a map from a pair of indices (u, v) with u < v to a +// distance. +func allToAllDistancesCIEDE2000(cs []Color) edgeDistance { + nc := len(cs) + m := make(edgeDistance, nc*nc) + for u := 0; u < nc-1; u++ { + for v := u + 1; v < nc; v++ { + m[edgeIdxs{u, v}] = cs[u].DistanceCIEDE2000(cs[v]) + } + } + return m +} + +// sortEdges sorts all edges in a distance map by increasing vertex distance. +func sortEdges(m edgeDistance) []edgeIdxs { + es := make([]edgeIdxs, 0, len(m)) + for uv := range m { + es = append(es, uv) + } + sort.Slice(es, func(i, j int) bool { + return m[es[i]] < m[es[j]] + }) + return es +} + +// minSpanTree computes a minimum spanning tree from a vertex count and a +// distance-sorted edge list. It returns the subset of edges that belong to +// the tree, including both (u, v) and (v, u) for each edge. +func minSpanTree(nc int, es []edgeIdxs) map[edgeIdxs]struct{} { + // Start with each vertex in its own set. + elts := make([]*element, nc) + for i := range elts { + elts[i] = newElement() + } + + // Run Kruskal's algorithm to construct a minimal spanning tree. + mst := make(map[edgeIdxs]struct{}, nc) + for _, uv := range es { + u, v := uv[0], uv[1] + if elts[u].find() == elts[v].find() { + continue // Same set: edge would introduce a cycle. + } + mst[uv] = struct{}{} + mst[edgeIdxs{v, u}] = struct{}{} + union(elts[u], elts[v]) + } + return mst +} + +// traverseMST walks a minimum spanning tree in prefix order. +func traverseMST(mst map[edgeIdxs]struct{}, root int) []int { + // Compute a list of neighbors for each vertex. + neighs := make(map[int][]int, len(mst)) + for uv := range mst { + u, v := uv[0], uv[1] + neighs[u] = append(neighs[u], v) + } + for u, vs := range neighs { + sort.Ints(vs) + copy(neighs[u], vs) + } + + // Walk the tree from a given vertex. + order := make([]int, 0, len(neighs)) + visited := make(map[int]bool, len(neighs)) + var walkFrom func(int) + walkFrom = func(r int) { + // Visit the starting vertex. + order = append(order, r) + visited[r] = true + + // Recursively visit each child in turn. + for _, c := range neighs[r] { + if !visited[c] { + walkFrom(c) + } + } + } + walkFrom(root) + return order +} + +// Sorted sorts a list of Color values. Sorting is not a well-defined operation +// for colors so the intention here primarily is to order colors so that the +// transition from one to the next is fairly smooth. +func Sorted(cs []Color) []Color { + // Do nothing in trivial cases. + newCs := make([]Color, len(cs)) + if len(cs) < 2 { + copy(newCs, cs) + return newCs + } + + // Compute the distance from each color to every other color. + dists := allToAllDistancesCIEDE2000(cs) + + // Produce a list of edges in increasing order of the distance between + // their vertices. + edges := sortEdges(dists) + + // Construct a minimum spanning tree from the list of edges. + mst := minSpanTree(len(cs), edges) + + // Find the darkest color in the list. + var black Color + var dIdx int // Index of darkest color + light := math.MaxFloat64 // Lightness of darkest color (distance from black) + for i, c := range cs { + d := black.DistanceCIEDE2000(c) + if d < light { + dIdx = i + light = d + } + } + + // Traverse the tree starting from the darkest color. + idxs := traverseMST(mst, dIdx) + + // Convert the index list to a list of colors, overwriting the input. + for i, idx := range idxs { + newCs[i] = cs[idx] + } + return newCs +} diff --git a/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go b/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go index 00f42a5c..d294fb41 100644 --- a/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go +++ b/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go @@ -1,13 +1,9 @@ package colorful -import ( - "math/rand" -) - // Uses the HSV color space to generate colors with similar S,V but distributed // evenly along their Hue. This is fast but not always pretty. // If you've got time to spare, use Lab (the non-fast below). -func FastWarmPalette(colorsCount int) (colors []Color) { +func FastWarmPaletteWithRand(colorsCount int, rand RandInterface) (colors []Color) { colors = make([]Color, colorsCount) for i := 0; i < colorsCount; i++ { @@ -16,10 +12,18 @@ func FastWarmPalette(colorsCount int) (colors []Color) { return } -func WarmPalette(colorsCount int) ([]Color, error) { +func FastWarmPalette(colorsCount int) (colors []Color) { + return FastWarmPaletteWithRand(colorsCount, getDefaultGlobalRand()) +} + +func WarmPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { warmy := func(l, a, b float64) bool { _, c, _ := LabToHcl(l, a, b) return 0.1 <= c && c <= 0.4 && 0.2 <= l && l <= 0.5 } - return SoftPaletteEx(colorsCount, SoftPaletteSettings{warmy, 50, true}) + return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{warmy, 50, true}, rand) +} + +func WarmPalette(colorsCount int) ([]Color, error) { + return WarmPaletteWithRand(colorsCount, getDefaultGlobalRand()) } diff --git a/vendor/github.com/mattn/go-runewidth/benchstat.txt b/vendor/github.com/mattn/go-runewidth/benchstat.txt new file mode 100644 index 00000000..a9efdbde --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/benchstat.txt @@ -0,0 +1,43 @@ +goos: darwin +goarch: arm64 +pkg: github.com/mattn/go-runewidth +cpu: Apple M2 + │ old.txt │ new.txt │ + │ sec/op │ sec/op vs base │ +String1WidthAll/regular-8 108.92m ± 0% 35.09m ± 3% -67.78% (p=0.002 n=6) +String1WidthAll/lut-8 93.97m ± 0% 18.70m ± 0% -80.10% (p=0.002 n=6) +String1Width768/regular-8 60.62µ ± 1% 11.54µ ± 0% -80.97% (p=0.002 n=6) +String1Width768/lut-8 60.66µ ± 1% 11.43µ ± 0% -81.16% (p=0.002 n=6) +String1WidthAllEastAsian/regular-8 115.13m ± 1% 40.79m ± 8% -64.57% (p=0.002 n=6) +String1WidthAllEastAsian/lut-8 93.65m ± 0% 18.70m ± 2% -80.03% (p=0.002 n=6) +String1Width768EastAsian/regular-8 75.32µ ± 0% 23.49µ ± 0% -68.82% (p=0.002 n=6) +String1Width768EastAsian/lut-8 60.76µ ± 0% 11.50µ ± 0% -81.07% (p=0.002 n=6) +geomean 2.562m 604.5µ -76.41% + + │ old.txt │ new.txt │ + │ B/op │ B/op vs base │ +String1WidthAll/regular-8 106.3Mi ± 0% 0.0Mi ± 0% -100.00% (p=0.002 n=6) +String1WidthAll/lut-8 106.3Mi ± 0% 0.0Mi ± 0% -100.00% (p=0.002 n=6) +String1Width768/regular-8 75.00Ki ± 0% 0.00Ki ± 0% -100.00% (p=0.002 n=6) +String1Width768/lut-8 75.00Ki ± 0% 0.00Ki ± 0% -100.00% (p=0.002 n=6) +String1WidthAllEastAsian/regular-8 106.3Mi ± 0% 0.0Mi ± 0% -100.00% (p=0.002 n=6) +String1WidthAllEastAsian/lut-8 106.3Mi ± 0% 0.0Mi ± 0% -100.00% (p=0.002 n=6) +String1Width768EastAsian/regular-8 75.00Ki ± 0% 0.00Ki ± 0% -100.00% (p=0.002 n=6) +String1Width768EastAsian/lut-8 75.00Ki ± 0% 0.00Ki ± 0% -100.00% (p=0.002 n=6) +geomean 2.790Mi ? ¹ ² +¹ summaries must be >0 to compute geomean +² ratios must be >0 to compute geomean + + │ old.txt │ new.txt │ + │ allocs/op │ allocs/op vs base │ +String1WidthAll/regular-8 3.342M ± 0% 0.000M ± 0% -100.00% (p=0.002 n=6) +String1WidthAll/lut-8 3.342M ± 0% 0.000M ± 0% -100.00% (p=0.002 n=6) +String1Width768/regular-8 2.304k ± 0% 0.000k ± 0% -100.00% (p=0.002 n=6) +String1Width768/lut-8 2.304k ± 0% 0.000k ± 0% -100.00% (p=0.002 n=6) +String1WidthAllEastAsian/regular-8 3.342M ± 0% 0.000M ± 0% -100.00% (p=0.002 n=6) +String1WidthAllEastAsian/lut-8 3.342M ± 0% 0.000M ± 0% -100.00% (p=0.002 n=6) +String1Width768EastAsian/regular-8 2.304k ± 0% 0.000k ± 0% -100.00% (p=0.002 n=6) +String1Width768EastAsian/lut-8 2.304k ± 0% 0.000k ± 0% -100.00% (p=0.002 n=6) +geomean 87.75k ? ¹ ² +¹ summaries must be >0 to compute geomean +² ratios must be >0 to compute geomean diff --git a/vendor/github.com/mattn/go-runewidth/new.txt b/vendor/github.com/mattn/go-runewidth/new.txt new file mode 100644 index 00000000..88907125 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/new.txt @@ -0,0 +1,54 @@ +goos: darwin +goarch: arm64 +pkg: github.com/mattn/go-runewidth +cpu: Apple M2 +BenchmarkString1WidthAll/regular-8 33 35033923 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/regular-8 33 34965112 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/regular-8 33 36307234 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/regular-8 33 35007705 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/regular-8 33 35154182 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/regular-8 34 35155400 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 63 18688500 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 63 18712474 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 63 18700211 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 62 18694179 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 62 18708392 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAll/lut-8 63 18770608 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 104137 11526 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 103986 11540 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 104079 11552 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 103963 11530 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 103714 11538 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/regular-8 104181 11537 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 105150 11420 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 104778 11423 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 105069 11422 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 105127 11475 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 104742 11433 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768/lut-8 105163 11432 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 40723347 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 40790299 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 40801338 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 40798216 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 44135253 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 28 40779546 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 62 18694165 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 62 18685047 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 62 18689273 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 62 19150346 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 63 19126154 ns/op 0 B/op 0 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 62 18712619 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 50775 23595 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 51061 23563 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 51057 23492 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 51138 23445 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 51195 23469 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/regular-8 51087 23482 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104559 11549 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104508 11483 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104296 11503 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104606 11485 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104588 11495 ns/op 0 B/op 0 allocs/op +BenchmarkString1Width768EastAsian/lut-8 104602 11518 ns/op 0 B/op 0 allocs/op +PASS +ok github.com/mattn/go-runewidth 64.455s diff --git a/vendor/github.com/mattn/go-runewidth/old.txt b/vendor/github.com/mattn/go-runewidth/old.txt new file mode 100644 index 00000000..5b9ac164 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/old.txt @@ -0,0 +1,54 @@ +goos: darwin +goarch: arm64 +pkg: github.com/mattn/go-runewidth +cpu: Apple M2 +BenchmarkString1WidthAll/regular-8 10 108559258 ns/op 111412145 B/op 3342342 allocs/op +BenchmarkString1WidthAll/regular-8 10 108968079 ns/op 111412364 B/op 3342343 allocs/op +BenchmarkString1WidthAll/regular-8 10 108890338 ns/op 111412388 B/op 3342344 allocs/op +BenchmarkString1WidthAll/regular-8 10 108940704 ns/op 111412584 B/op 3342346 allocs/op +BenchmarkString1WidthAll/regular-8 10 108632796 ns/op 111412348 B/op 3342343 allocs/op +BenchmarkString1WidthAll/regular-8 10 109354546 ns/op 111412777 B/op 3342343 allocs/op +BenchmarkString1WidthAll/lut-8 12 93844406 ns/op 111412569 B/op 3342345 allocs/op +BenchmarkString1WidthAll/lut-8 12 93991080 ns/op 111412512 B/op 3342344 allocs/op +BenchmarkString1WidthAll/lut-8 12 93980632 ns/op 111412413 B/op 3342343 allocs/op +BenchmarkString1WidthAll/lut-8 12 94004083 ns/op 111412396 B/op 3342343 allocs/op +BenchmarkString1WidthAll/lut-8 12 93959795 ns/op 111412445 B/op 3342343 allocs/op +BenchmarkString1WidthAll/lut-8 12 93846198 ns/op 111412556 B/op 3342345 allocs/op +BenchmarkString1Width768/regular-8 19785 60696 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/regular-8 19824 60520 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/regular-8 19832 60547 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/regular-8 19778 60543 ns/op 76800 B/op 2304 allocs/op +BenchmarkString1Width768/regular-8 19842 61142 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/regular-8 19780 60696 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19598 61161 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19731 60707 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19738 60626 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19764 60670 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19797 60642 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768/lut-8 19738 60608 ns/op 76800 B/op 2304 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 115080431 ns/op 111412458 B/op 3342345 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 114908880 ns/op 111412476 B/op 3342345 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 115077134 ns/op 111412540 B/op 3342345 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 115175292 ns/op 111412467 B/op 3342345 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 115792653 ns/op 111412362 B/op 3342344 allocs/op +BenchmarkString1WidthAllEastAsian/regular-8 9 115255417 ns/op 111412572 B/op 3342346 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 93761542 ns/op 111412538 B/op 3342345 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 94089990 ns/op 111412440 B/op 3342343 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 93721410 ns/op 111412514 B/op 3342344 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 93572951 ns/op 111412329 B/op 3342342 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 93536052 ns/op 111412206 B/op 3342341 allocs/op +BenchmarkString1WidthAllEastAsian/lut-8 12 93532365 ns/op 111412412 B/op 3342343 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15904 75401 ns/op 76800 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15932 75449 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15944 75181 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15963 75311 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15879 75292 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/regular-8 15955 75334 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19692 60692 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19712 60699 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19741 60819 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19771 60653 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19737 61027 ns/op 76801 B/op 2304 allocs/op +BenchmarkString1Width768EastAsian/lut-8 19657 60820 ns/op 76801 B/op 2304 allocs/op +PASS +ok github.com/mattn/go-runewidth 76.165s diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index 7dfbb3be..0edabac3 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -4,7 +4,7 @@ import ( "os" "strings" - "github.com/rivo/uniseg" + "github.com/clipperhouse/uax29/v2/graphemes" ) //go:generate go run script/generate.go @@ -64,6 +64,9 @@ func inTable(r rune, t table) bool { if r < t[0].first { return false } + if r > t[len(t)-1].last { + return false + } bot := 0 top := len(t) - 1 @@ -175,10 +178,10 @@ func (c *Condition) CreateLUT() { // StringWidth return width as you can see func (c *Condition) StringWidth(s string) (width int) { - g := uniseg.NewGraphemes(s) + g := graphemes.FromString(s) for g.Next() { var chWidth int - for _, r := range g.Runes() { + for _, r := range g.Value() { chWidth = c.RuneWidth(r) if chWidth > 0 { break // Our best guess at this point is to use the width of the first non-zero-width rune. @@ -197,17 +200,17 @@ func (c *Condition) Truncate(s string, w int, tail string) string { w -= c.StringWidth(tail) var width int pos := len(s) - g := uniseg.NewGraphemes(s) + g := graphemes.FromString(s) for g.Next() { var chWidth int - for _, r := range g.Runes() { + for _, r := range g.Value() { chWidth = c.RuneWidth(r) if chWidth > 0 { break // See StringWidth() for details. } } if width+chWidth > w { - pos, _ = g.Positions() + pos = g.Start() break } width += chWidth @@ -224,10 +227,10 @@ func (c *Condition) TruncateLeft(s string, w int, prefix string) string { var width int pos := len(s) - g := uniseg.NewGraphemes(s) + g := graphemes.FromString(s) for g.Next() { var chWidth int - for _, r := range g.Runes() { + for _, r := range g.Value() { chWidth = c.RuneWidth(r) if chWidth > 0 { break // See StringWidth() for details. @@ -236,10 +239,10 @@ func (c *Condition) TruncateLeft(s string, w int, prefix string) string { if width+chWidth > w { if width < w { - _, pos = g.Positions() + pos = g.End() prefix += strings.Repeat(" ", width+chWidth-w) } else { - pos, _ = g.Positions() + pos = g.Start() } break diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go index 5f987a31..951500a2 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -4,6 +4,7 @@ package runewidth import ( + "os" "syscall" ) @@ -14,6 +15,11 @@ var ( // IsEastAsian return true if the current locale is CJK func IsEastAsian() bool { + if os.Getenv("WT_SESSION") != "" { + // Windows Terminal always not use East Asian Ambiguous Width(s). + return false + } + r1, _, _ := procGetConsoleOutputCP.Call() if r1 == 0 { return false diff --git a/vendor/github.com/muesli/reflow/LICENSE b/vendor/github.com/muesli/reflow/LICENSE new file mode 100644 index 00000000..8532c45c --- /dev/null +++ b/vendor/github.com/muesli/reflow/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Christian Muehlhaeuser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/muesli/reflow/ansi/ansi.go b/vendor/github.com/muesli/reflow/ansi/ansi.go new file mode 100644 index 00000000..f3d0700a --- /dev/null +++ b/vendor/github.com/muesli/reflow/ansi/ansi.go @@ -0,0 +1,7 @@ +package ansi + +const Marker = '\x1B' + +func IsTerminator(c rune) bool { + return (c >= 0x40 && c <= 0x5a) || (c >= 0x61 && c <= 0x7a) +} diff --git a/vendor/github.com/muesli/reflow/ansi/buffer.go b/vendor/github.com/muesli/reflow/ansi/buffer.go new file mode 100644 index 00000000..471bcaf7 --- /dev/null +++ b/vendor/github.com/muesli/reflow/ansi/buffer.go @@ -0,0 +1,40 @@ +package ansi + +import ( + "bytes" + + "github.com/mattn/go-runewidth" +) + +// Buffer is a buffer aware of ANSI escape sequences. +type Buffer struct { + bytes.Buffer +} + +// PrintableRuneWidth returns the cell width of all printable runes in the +// buffer. +func (w Buffer) PrintableRuneWidth() int { + return PrintableRuneWidth(w.String()) +} + +// PrintableRuneWidth returns the cell width of the given string. +func PrintableRuneWidth(s string) int { + var n int + var ansi bool + + for _, c := range s { + if c == Marker { + // ANSI escape sequence + ansi = true + } else if ansi { + if IsTerminator(c) { + // ANSI sequence terminated + ansi = false + } + } else { + n += runewidth.RuneWidth(c) + } + } + + return n +} diff --git a/vendor/github.com/muesli/reflow/ansi/writer.go b/vendor/github.com/muesli/reflow/ansi/writer.go new file mode 100644 index 00000000..a6aaa1ec --- /dev/null +++ b/vendor/github.com/muesli/reflow/ansi/writer.go @@ -0,0 +1,76 @@ +package ansi + +import ( + "bytes" + "io" + "unicode/utf8" +) + +type Writer struct { + Forward io.Writer + + ansi bool + ansiseq bytes.Buffer + lastseq bytes.Buffer + seqchanged bool + runeBuf []byte +} + +// Write is used to write content to the ANSI buffer. +func (w *Writer) Write(b []byte) (int, error) { + for _, c := range string(b) { + if c == Marker { + // ANSI escape sequence + w.ansi = true + w.seqchanged = true + _, _ = w.ansiseq.WriteRune(c) + } else if w.ansi { + _, _ = w.ansiseq.WriteRune(c) + if IsTerminator(c) { + // ANSI sequence terminated + w.ansi = false + + if bytes.HasSuffix(w.ansiseq.Bytes(), []byte("[0m")) { + // reset sequence + w.lastseq.Reset() + w.seqchanged = false + } else if c == 'm' { + // color code + _, _ = w.lastseq.Write(w.ansiseq.Bytes()) + } + + _, _ = w.ansiseq.WriteTo(w.Forward) + } + } else { + _, err := w.writeRune(c) + if err != nil { + return 0, err + } + } + } + + return len(b), nil +} + +func (w *Writer) writeRune(r rune) (int, error) { + if w.runeBuf == nil { + w.runeBuf = make([]byte, utf8.UTFMax) + } + n := utf8.EncodeRune(w.runeBuf, r) + return w.Forward.Write(w.runeBuf[:n]) +} + +func (w *Writer) LastSequence() string { + return w.lastseq.String() +} + +func (w *Writer) ResetAnsi() { + if !w.seqchanged { + return + } + _, _ = w.Forward.Write([]byte("\x1b[0m")) +} + +func (w *Writer) RestoreAnsi() { + _, _ = w.Forward.Write(w.lastseq.Bytes()) +} diff --git a/vendor/github.com/muesli/reflow/truncate/truncate.go b/vendor/github.com/muesli/reflow/truncate/truncate.go new file mode 100644 index 00000000..5aab5f89 --- /dev/null +++ b/vendor/github.com/muesli/reflow/truncate/truncate.go @@ -0,0 +1,120 @@ +package truncate + +import ( + "bytes" + "io" + + "github.com/mattn/go-runewidth" + + "github.com/muesli/reflow/ansi" +) + +type Writer struct { + width uint + tail string + + ansiWriter *ansi.Writer + buf bytes.Buffer + ansi bool +} + +func NewWriter(width uint, tail string) *Writer { + w := &Writer{ + width: width, + tail: tail, + } + w.ansiWriter = &ansi.Writer{ + Forward: &w.buf, + } + return w +} + +func NewWriterPipe(forward io.Writer, width uint, tail string) *Writer { + return &Writer{ + width: width, + tail: tail, + ansiWriter: &ansi.Writer{ + Forward: forward, + }, + } +} + +// Bytes is shorthand for declaring a new default truncate-writer instance, +// used to immediately truncate a byte slice. +func Bytes(b []byte, width uint) []byte { + return BytesWithTail(b, width, []byte("")) +} + +// Bytes is shorthand for declaring a new default truncate-writer instance, +// used to immediately truncate a byte slice. A tail is then added to the +// end of the byte slice. +func BytesWithTail(b []byte, width uint, tail []byte) []byte { + f := NewWriter(width, string(tail)) + _, _ = f.Write(b) + + return f.Bytes() +} + +// String is shorthand for declaring a new default truncate-writer instance, +// used to immediately truncate a string. +func String(s string, width uint) string { + return StringWithTail(s, width, "") +} + +// StringWithTail is shorthand for declaring a new default truncate-writer instance, +// used to immediately truncate a string. A tail is then added to the end of the +// string. +func StringWithTail(s string, width uint, tail string) string { + return string(BytesWithTail([]byte(s), width, []byte(tail))) +} + +// Write truncates content at the given printable cell width, leaving any +// ansi sequences intact. +func (w *Writer) Write(b []byte) (int, error) { + tw := ansi.PrintableRuneWidth(w.tail) + if w.width < uint(tw) { + return w.buf.WriteString(w.tail) + } + + w.width -= uint(tw) + var curWidth uint + + for _, c := range string(b) { + if c == ansi.Marker { + // ANSI escape sequence + w.ansi = true + } else if w.ansi { + if ansi.IsTerminator(c) { + // ANSI sequence terminated + w.ansi = false + } + } else { + curWidth += uint(runewidth.RuneWidth(c)) + } + + if curWidth > w.width { + n, err := w.buf.WriteString(w.tail) + if w.ansiWriter.LastSequence() != "" { + w.ansiWriter.ResetAnsi() + } + return n, err + } + + _, err := w.ansiWriter.Write([]byte(string(c))) + if err != nil { + return 0, err + } + } + + return len(b), nil +} + +// Bytes returns the truncated result as a byte slice. +func (w *Writer) Bytes() []byte { + return w.buf.Bytes() +} + +// String returns the truncated result as a string. +func (w *Writer) String() string { + return w.buf.String() +} diff --git a/vendor/github.com/muesli/reflow/wordwrap/wordwrap.go b/vendor/github.com/muesli/reflow/wordwrap/wordwrap.go new file mode 100644 index 00000000..488fb210 --- /dev/null +++ b/vendor/github.com/muesli/reflow/wordwrap/wordwrap.go @@ -0,0 +1,167 @@ +package wordwrap + +import ( + "bytes" + "strings" + "unicode" + + "github.com/muesli/reflow/ansi" +) + +var ( + defaultBreakpoints = []rune{'-'} + defaultNewline = []rune{'\n'} +) + +// WordWrap contains settings and state for customisable text reflowing with +// support for ANSI escape sequences. This means you can style your terminal +// output without affecting the word wrapping algorithm. +type WordWrap struct { + Limit int + Breakpoints []rune + Newline []rune + KeepNewlines bool + + buf bytes.Buffer + space bytes.Buffer + word ansi.Buffer + + lineLen int + ansi bool +} + +// NewWriter returns a new instance of a word-wrapping writer, initialized with +// default settings. +func NewWriter(limit int) *WordWrap { + return &WordWrap{ + Limit: limit, + Breakpoints: defaultBreakpoints, + Newline: defaultNewline, + KeepNewlines: true, + } +} + +// Bytes is shorthand for declaring a new default WordWrap instance, +// used to immediately word-wrap a byte slice. +func Bytes(b []byte, limit int) []byte { + f := NewWriter(limit) + _, _ = f.Write(b) + _ = f.Close() + + return f.Bytes() +} + +// String is shorthand for declaring a new default WordWrap instance, +// used to immediately word-wrap a string. +func String(s string, limit int) string { + return string(Bytes([]byte(s), limit)) +} + +func (w *WordWrap) addSpace() { + w.lineLen += w.space.Len() + _, _ = w.buf.Write(w.space.Bytes()) + w.space.Reset() +} + +func (w *WordWrap) addWord() { + if w.word.Len() > 0 { + w.addSpace() + w.lineLen += w.word.PrintableRuneWidth() + _, _ = w.buf.Write(w.word.Bytes()) + w.word.Reset() + } +} + +func (w *WordWrap) addNewLine() { + _, _ = w.buf.WriteRune('\n') + w.lineLen = 0 + w.space.Reset() +} + +func inGroup(a []rune, c rune) bool { + for _, v := range a { + if v == c { + return true + } + } + return false +} + +// Write is used to write more content to the word-wrap buffer. +func (w *WordWrap) Write(b []byte) (int, error) { + if w.Limit == 0 { + return w.buf.Write(b) + } + + s := string(b) + if !w.KeepNewlines { + s = strings.Replace(strings.TrimSpace(s), "\n", " ", -1) + } + + for _, c := range s { + if c == '\x1B' { + // ANSI escape sequence + _, _ = w.word.WriteRune(c) + w.ansi = true + } else if w.ansi { + _, _ = w.word.WriteRune(c) + if (c >= 0x40 && c <= 0x5a) || (c >= 0x61 && c <= 0x7a) { + // ANSI sequence terminated + w.ansi = false + } + } else if inGroup(w.Newline, c) { + // end of current line + // see if we can add the content of the space buffer to the current line + if w.word.Len() == 0 { + if w.lineLen+w.space.Len() > w.Limit { + w.lineLen = 0 + } else { + // preserve whitespace + _, _ = w.buf.Write(w.space.Bytes()) + } + w.space.Reset() + } + + w.addWord() + w.addNewLine() + } else if unicode.IsSpace(c) { + // end of current word + w.addWord() + _, _ = w.space.WriteRune(c) + } else if inGroup(w.Breakpoints, c) { + // valid breakpoint + w.addSpace() + w.addWord() + _, _ = w.buf.WriteRune(c) + } else { + // any other character + _, _ = w.word.WriteRune(c) + + // add a line break if the current word would exceed the line's + // character limit + if w.lineLen+w.space.Len()+w.word.PrintableRuneWidth() > w.Limit && + w.word.PrintableRuneWidth() < w.Limit { + w.addNewLine() + } + } + } + + return len(b), nil +} + +// Close will finish the word-wrap operation. Always call it before trying to +// retrieve the final result. +func (w *WordWrap) Close() error { + w.addWord() + return nil +} + +// Bytes returns the word-wrapped result as a byte slice. +func (w *WordWrap) Bytes() []byte { + return w.buf.Bytes() +} + +// String returns the word-wrapped result as a string. +func (w *WordWrap) String() string { + return w.buf.String() +} diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm index f1199315..cbbb007e 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm @@ -1,4 +1,4 @@ -FROM golang:1.24@sha256:20a022e5112a144aa7b7aeb3f22ebf2cdaefcc4aac0d64e8deeee8cdc18b9c0f +FROM golang:1.24@sha256:14fd8a55e59a560704e5fc44970b301d00d344e45d6b914dda228e09f359a088 ENV GOOS=linux ENV GOARCH=arm diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 index fa63a029..93b2bb1e 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 @@ -1,4 +1,4 @@ -FROM golang:1.24@sha256:20a022e5112a144aa7b7aeb3f22ebf2cdaefcc4aac0d64e8deeee8cdc18b9c0f +FROM golang:1.24@sha256:14fd8a55e59a560704e5fc44970b301d00d344e45d6b914dda228e09f359a088 ENV GOOS=linux ENV GOARCH=arm64 diff --git a/vendor/github.com/pjbgf/sha1cd/Makefile b/vendor/github.com/pjbgf/sha1cd/Makefile index a9a738b9..e746d62a 100644 --- a/vendor/github.com/pjbgf/sha1cd/Makefile +++ b/vendor/github.com/pjbgf/sha1cd/Makefile @@ -31,9 +31,6 @@ build-nocgo: # Run cross-compilation to assure supported architectures. cross-build: build-arm build-arm64 build-nocgo -generate: - go generate -x ./... - -verify: generate +verify: git diff --exit-code go vet ./... diff --git a/vendor/github.com/pjbgf/sha1cd/README.md b/vendor/github.com/pjbgf/sha1cd/README.md index 378cf78c..f3ae7329 100644 --- a/vendor/github.com/pjbgf/sha1cd/README.md +++ b/vendor/github.com/pjbgf/sha1cd/README.md @@ -6,8 +6,7 @@ collision attacks. The `cgo/lib` code is a carbon copy of the [original code], based on the award winning [white paper] by Marc Stevens. -The Go implementation is largely based off Go's generic sha1. -At present no SIMD optimisations have been implemented. +The Go and native implementations are largely based off upstream Go. ## Usage diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cd.go b/vendor/github.com/pjbgf/sha1cd/sha1cd.go index 509569f6..b8d2890a 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cd.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cd.go @@ -20,8 +20,6 @@ import ( shared "github.com/pjbgf/sha1cd/internal" ) -//go:generate go run -C asm . -out ../sha1cdblock_amd64.s -pkg $GOPACKAGE - func init() { crypto.RegisterHash(crypto.SHA1, New) } @@ -40,8 +38,7 @@ type digest struct { len uint64 // col defines whether a collision has been found. - col bool - blockFunc func(dig *digest, p []byte) + col bool } func (d *digest) MarshalBinary() ([]byte, error) { @@ -101,7 +98,7 @@ func (d *digest) UnmarshalBinary(b []byte) error { func consumeUint64(b []byte) ([]byte, uint64) { _ = b[7] - x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[shared.WordBuffers])<<16 | uint64(b[4])<<24 | + x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 return b[8:], x } @@ -128,21 +125,9 @@ func (d *digest) Reset() { // implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to // marshal and unmarshal the internal state of the hash. func New() hash.Hash { - d := new(digest) - - d.blockFunc = block + var d digest d.Reset() - return d -} - -// NewGeneric is equivalent to New but uses the Go generic implementation, -// avoiding any processor-specific optimizations. -func NewGeneric() hash.Hash { - d := new(digest) - - d.blockFunc = blockGeneric - d.Reset() - return d + return &d } func (d *digest) Size() int { return Size } @@ -160,14 +145,14 @@ func (d *digest) Write(p []byte) (nn int, err error) { n := copy(d.x[d.nx:], p) d.nx += n if d.nx == shared.Chunk { - d.blockFunc(d, d.x[:]) + block(d, d.x[:]) d.nx = 0 } p = p[n:] } if len(p) >= shared.Chunk { n := len(p) &^ (shared.Chunk - 1) - d.blockFunc(d, p[:n]) + block(d, p[:n]) p = p[n:] } if len(p) > 0 { @@ -186,18 +171,20 @@ func (d *digest) Sum(in []byte) []byte { func (d *digest) checkSum() [Size]byte { len := d.len // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte + var tmp [64 + 8]byte tmp[0] = 0x80 + var t uint64 if len%64 < 56 { - d.Write(tmp[0 : 56-len%64]) + t = 56 - len%64 } else { - d.Write(tmp[0 : 64+56-len%64]) + t = 64 + 56 - len%64 } // Length in bits. len <<= 3 - binary.BigEndian.PutUint64(tmp[:], len) - d.Write(tmp[0:8]) + padlen := tmp[:t+8] + binary.BigEndian.PutUint64(tmp[t:], len) + d.Write(padlen) if d.nx != 0 { panic("d.nx != 0") @@ -216,7 +203,8 @@ func (d *digest) checkSum() [Size]byte { // Sum returns the SHA-1 checksum of the data. func Sum(data []byte) ([Size]byte, bool) { - d := New().(*digest) + var d digest + d.Reset() d.Write(data) return d.checkSum(), d.col } diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go index 7ee0f20a..7b3ad25b 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go @@ -4,39 +4,47 @@ package sha1cd import ( - "math" - "unsafe" + "runtime" + "github.com/klauspost/cpuid/v2" shared "github.com/pjbgf/sha1cd/internal" ) -// blockAMD64 hashes the message p into the current state in dig. +var hasSHANI = (runtime.GOARCH == "amd64" && + cpuid.CPU.Supports(cpuid.AVX) && + cpuid.CPU.Supports(cpuid.SHA) && + cpuid.CPU.Supports(cpuid.SSE3) && + cpuid.CPU.Supports(cpuid.SSE4)) + +// blockAMD64 hashes the message p into the current state in h. // Both m1 and cs are used to store intermediate results which are used by the collision detection logic. // //go:noescape -func blockAMD64(dig *digest, p sliceHeader, m1 []uint32, cs [][5]uint32) +func blockAMD64(h []uint32, p []byte, m1 []uint32, cs [][5]uint32) func block(dig *digest, p []byte) { + if forceGeneric || !hasSHANI { + blockGeneric(dig, p) + return + } + m1 := [shared.Rounds]uint32{} cs := [shared.PreStepState][shared.WordBuffers]uint32{} for len(p) >= shared.Chunk { - // Only send a block to be processed, as the collission detection - // works on a block by block basis. - ips := sliceHeader{ - base: uintptr(unsafe.Pointer(&p[0])), - len: int(math.Min(float64(len(p)), float64(shared.Chunk))), - cap: shared.Chunk, - } + // The assembly code only supports processing a block at a time, + // so adjust the chunk accordingly. + chunk := p[:shared.Chunk] - blockAMD64(dig, ips, m1[:], cs[:]) + blockAMD64(dig.h[:], chunk, m1[:], cs[:]) + rectifyCompressionState(m1, &cs) col := checkCollision(m1, cs, dig.h) if col { dig.col = true - blockAMD64(dig, ips, m1[:], cs[:]) - blockAMD64(dig, ips, m1[:], cs[:]) + blockAMD64(dig.h[:], chunk, m1[:], cs[:]) + blockAMD64(dig.h[:], chunk, m1[:], cs[:]) } p = p[shared.Chunk:] diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s index fa5022be..e2725f3c 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s @@ -1,2272 +1,274 @@ -// Code generated by command: go run asm.go -out ../sha1cdblock_amd64.s -pkg sha1cd. DO NOT EDIT. - //go:build !noasm && gc && amd64 && !arm64 #include "textflag.h" -// func blockAMD64(dig *digest, p []byte, m1 []uint32, cs [][5]uint32) -TEXT ·blockAMD64(SB), NOSPLIT, $64-80 - MOVQ dig+0(FP), R8 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $+6, DX - SHLQ $+6, DX - LEAQ (SI)(DX*1), DI +// License information for the original SHA1 arm64 implemention: +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at: +// - https://github.com/golang/go/blob/master/LICENSE +// +// Reference implementations: +// - https://github.com/golang/go/blob/master/src/crypto/sha1/sha1block_amd64.s - // Load h0, h1, h2, h3, h4. - MOVL (R8), AX - MOVL 4(R8), BX - MOVL 8(R8), CX - MOVL 12(R8), DX - MOVL 16(R8), BP +#define LOADCS(abcd, e, index, target) \ + VPEXTRD $3, abcd, ((index*20)+0)(target); \ + VPEXTRD $2, abcd, ((index*20)+4)(target); \ + VPEXTRD $1, abcd, ((index*20)+8)(target); \ + VPEXTRD $0, abcd, ((index*20)+12)(target); \ + MOVL e, ((index*20)+16)(target); - // len(p) >= chunk - CMPQ DI, SI - JBE end +#define LOADM1(m1, index, target) \ + VPSHUFD $0x1B, m1, X8; \ + VMOVDQU X8, ((index*16)+0)(target); + +// func blockAMD64(h []uint32, p []byte, m1 []uint32, cs [][5]uint32) +// Requires: AVX, SHA, SSE2, SSE4.1, SSSE3 +TEXT ·blockAMD64(SB), NOSPLIT, $80-96 + MOVQ h_base+0(FP), DI + MOVQ p_base+24(FP), SI + MOVQ p_len+32(FP), DX + MOVQ m1_base+48(FP), R13 + MOVQ cs_base+72(FP), R15 + CMPQ DX, $0x00 + JEQ done + ADDQ SI, DX + + // Allocate space on the stack for saving ABCD and E0, and align it to 16 bytes + LEAQ 15(SP), AX + MOVQ $0x000000000000000f, CX + NOTQ CX + ANDQ CX, AX + + // Load initial hash state + PINSRD $0x03, 16(DI), X5 + VMOVDQU (DI), X0 + PAND upper_mask<>+0(SB), X5 + PSHUFD $0x1b, X0, X0 + VMOVDQA shuffle_mask<>+0(SB), X7 loop: - // Initialize registers a, b, c, d, e. - MOVL AX, R9 - MOVL BX, R10 - MOVL CX, R11 - MOVL DX, R12 - MOVL BP, R13 - - // ROUND1 (steps 0-15) - // Load cs - MOVQ cs_base+56(FP), R8 - MOVL R9, (R8) - MOVL R10, 4(R8) - MOVL R11, 8(R8) - MOVL R12, 12(R8) - MOVL R13, 16(R8) - - // ROUND1(0) - // LOAD - MOVL (SI), DI - BSWAPL DI - MOVL DI, (SP) - - // FUNC1 - MOVL R12, R14 - XORL R11, R14 - ANDL R10, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1518500249(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL (SP), DI - MOVL DI, (R8) - - // ROUND1(1) - // LOAD - MOVL 4(SI), DI - BSWAPL DI - MOVL DI, 4(SP) - - // FUNC1 - MOVL R11, R14 - XORL R10, R14 - ANDL R9, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1518500249(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 4(SP), DI - MOVL DI, 4(R8) - - // ROUND1(2) - // LOAD - MOVL 8(SI), DI - BSWAPL DI - MOVL DI, 8(SP) - - // FUNC1 - MOVL R10, R14 - XORL R9, R14 - ANDL R13, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1518500249(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 8(SP), DI - MOVL DI, 8(R8) - - // ROUND1(3) - // LOAD - MOVL 12(SI), DI - BSWAPL DI - MOVL DI, 12(SP) - - // FUNC1 - MOVL R9, R14 - XORL R13, R14 - ANDL R12, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1518500249(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 12(SP), DI - MOVL DI, 12(R8) - - // ROUND1(4) - // LOAD - MOVL 16(SI), DI - BSWAPL DI - MOVL DI, 16(SP) - - // FUNC1 - MOVL R13, R14 - XORL R12, R14 - ANDL R11, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1518500249(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 16(SP), DI - MOVL DI, 16(R8) - - // ROUND1(5) - // LOAD - MOVL 20(SI), DI - BSWAPL DI - MOVL DI, 20(SP) - - // FUNC1 - MOVL R12, R14 - XORL R11, R14 - ANDL R10, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1518500249(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 20(SP), DI - MOVL DI, 20(R8) - - // ROUND1(6) - // LOAD - MOVL 24(SI), DI - BSWAPL DI - MOVL DI, 24(SP) - - // FUNC1 - MOVL R11, R14 - XORL R10, R14 - ANDL R9, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1518500249(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 24(SP), DI - MOVL DI, 24(R8) - - // ROUND1(7) - // LOAD - MOVL 28(SI), DI - BSWAPL DI - MOVL DI, 28(SP) - - // FUNC1 - MOVL R10, R14 - XORL R9, R14 - ANDL R13, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1518500249(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 28(SP), DI - MOVL DI, 28(R8) - - // ROUND1(8) - // LOAD - MOVL 32(SI), DI - BSWAPL DI - MOVL DI, 32(SP) - - // FUNC1 - MOVL R9, R14 - XORL R13, R14 - ANDL R12, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1518500249(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 32(SP), DI - MOVL DI, 32(R8) - - // ROUND1(9) - // LOAD - MOVL 36(SI), DI - BSWAPL DI - MOVL DI, 36(SP) - - // FUNC1 - MOVL R13, R14 - XORL R12, R14 - ANDL R11, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1518500249(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 36(SP), DI - MOVL DI, 36(R8) - - // ROUND1(10) - // LOAD - MOVL 40(SI), DI - BSWAPL DI - MOVL DI, 40(SP) - - // FUNC1 - MOVL R12, R14 - XORL R11, R14 - ANDL R10, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1518500249(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 40(SP), DI - MOVL DI, 40(R8) - - // ROUND1(11) - // LOAD - MOVL 44(SI), DI - BSWAPL DI - MOVL DI, 44(SP) - - // FUNC1 - MOVL R11, R14 - XORL R10, R14 - ANDL R9, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1518500249(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 44(SP), DI - MOVL DI, 44(R8) - - // ROUND1(12) - // LOAD - MOVL 48(SI), DI - BSWAPL DI - MOVL DI, 48(SP) - - // FUNC1 - MOVL R10, R14 - XORL R9, R14 - ANDL R13, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1518500249(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 48(SP), DI - MOVL DI, 48(R8) - - // ROUND1(13) - // LOAD - MOVL 52(SI), DI - BSWAPL DI - MOVL DI, 52(SP) - - // FUNC1 - MOVL R9, R14 - XORL R13, R14 - ANDL R12, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1518500249(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 52(SP), DI - MOVL DI, 52(R8) - - // ROUND1(14) - // LOAD - MOVL 56(SI), DI - BSWAPL DI - MOVL DI, 56(SP) - - // FUNC1 - MOVL R13, R14 - XORL R12, R14 - ANDL R11, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1518500249(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 56(SP), DI - MOVL DI, 56(R8) - - // ROUND1(15) - // LOAD - MOVL 60(SI), DI - BSWAPL DI - MOVL DI, 60(SP) - - // FUNC1 - MOVL R12, R14 - XORL R11, R14 - ANDL R10, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1518500249(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 60(SP), DI - MOVL DI, 60(R8) - - // ROUND1x (steps 16-19) - same as ROUND1 but with no data load. - // ROUND1x(16) - // SHUFFLE - MOVL (SP), DI - XORL 52(SP), DI - XORL 32(SP), DI - XORL 8(SP), DI - ROLL $+1, DI - MOVL DI, (SP) - - // FUNC1 - MOVL R11, R14 - XORL R10, R14 - ANDL R9, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1518500249(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL (SP), DI - MOVL DI, 64(R8) - - // ROUND1x(17) - // SHUFFLE - MOVL 4(SP), DI - XORL 56(SP), DI - XORL 36(SP), DI - XORL 12(SP), DI - ROLL $+1, DI - MOVL DI, 4(SP) - - // FUNC1 - MOVL R10, R14 - XORL R9, R14 - ANDL R13, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1518500249(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 4(SP), DI - MOVL DI, 68(R8) - - // ROUND1x(18) - // SHUFFLE - MOVL 8(SP), DI - XORL 60(SP), DI - XORL 40(SP), DI - XORL 16(SP), DI - ROLL $+1, DI - MOVL DI, 8(SP) - - // FUNC1 - MOVL R9, R14 - XORL R13, R14 - ANDL R12, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1518500249(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 8(SP), DI - MOVL DI, 72(R8) - - // ROUND1x(19) - // SHUFFLE - MOVL 12(SP), DI - XORL (SP), DI - XORL 44(SP), DI - XORL 20(SP), DI - ROLL $+1, DI - MOVL DI, 12(SP) - - // FUNC1 - MOVL R13, R14 - XORL R12, R14 - ANDL R11, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1518500249(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 12(SP), DI - MOVL DI, 76(R8) - - // ROUND2 (steps 20-39) - // ROUND2(20) - // SHUFFLE - MOVL 16(SP), DI - XORL 4(SP), DI - XORL 48(SP), DI - XORL 24(SP), DI - ROLL $+1, DI - MOVL DI, 16(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1859775393(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 16(SP), DI - MOVL DI, 80(R8) - - // ROUND2(21) - // SHUFFLE - MOVL 20(SP), DI - XORL 8(SP), DI - XORL 52(SP), DI - XORL 28(SP), DI - ROLL $+1, DI - MOVL DI, 20(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1859775393(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 20(SP), DI - MOVL DI, 84(R8) - - // ROUND2(22) - // SHUFFLE - MOVL 24(SP), DI - XORL 12(SP), DI - XORL 56(SP), DI - XORL 32(SP), DI - ROLL $+1, DI - MOVL DI, 24(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1859775393(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 24(SP), DI - MOVL DI, 88(R8) - - // ROUND2(23) - // SHUFFLE - MOVL 28(SP), DI - XORL 16(SP), DI - XORL 60(SP), DI - XORL 36(SP), DI - ROLL $+1, DI - MOVL DI, 28(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1859775393(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 28(SP), DI - MOVL DI, 92(R8) - - // ROUND2(24) - // SHUFFLE - MOVL 32(SP), DI - XORL 20(SP), DI - XORL (SP), DI - XORL 40(SP), DI - ROLL $+1, DI - MOVL DI, 32(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1859775393(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 32(SP), DI - MOVL DI, 96(R8) - - // ROUND2(25) - // SHUFFLE - MOVL 36(SP), DI - XORL 24(SP), DI - XORL 4(SP), DI - XORL 44(SP), DI - ROLL $+1, DI - MOVL DI, 36(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1859775393(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 36(SP), DI - MOVL DI, 100(R8) - - // ROUND2(26) - // SHUFFLE - MOVL 40(SP), DI - XORL 28(SP), DI - XORL 8(SP), DI - XORL 48(SP), DI - ROLL $+1, DI - MOVL DI, 40(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1859775393(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 40(SP), DI - MOVL DI, 104(R8) - - // ROUND2(27) - // SHUFFLE - MOVL 44(SP), DI - XORL 32(SP), DI - XORL 12(SP), DI - XORL 52(SP), DI - ROLL $+1, DI - MOVL DI, 44(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1859775393(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 44(SP), DI - MOVL DI, 108(R8) - - // ROUND2(28) - // SHUFFLE - MOVL 48(SP), DI - XORL 36(SP), DI - XORL 16(SP), DI - XORL 56(SP), DI - ROLL $+1, DI - MOVL DI, 48(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1859775393(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 48(SP), DI - MOVL DI, 112(R8) - - // ROUND2(29) - // SHUFFLE - MOVL 52(SP), DI - XORL 40(SP), DI - XORL 20(SP), DI - XORL 60(SP), DI - ROLL $+1, DI - MOVL DI, 52(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1859775393(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 52(SP), DI - MOVL DI, 116(R8) - - // ROUND2(30) - // SHUFFLE - MOVL 56(SP), DI - XORL 44(SP), DI - XORL 24(SP), DI - XORL (SP), DI - ROLL $+1, DI - MOVL DI, 56(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1859775393(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 56(SP), DI - MOVL DI, 120(R8) - - // ROUND2(31) - // SHUFFLE - MOVL 60(SP), DI - XORL 48(SP), DI - XORL 28(SP), DI - XORL 4(SP), DI - ROLL $+1, DI - MOVL DI, 60(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1859775393(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 60(SP), DI - MOVL DI, 124(R8) - - // ROUND2(32) - // SHUFFLE - MOVL (SP), DI - XORL 52(SP), DI - XORL 32(SP), DI - XORL 8(SP), DI - ROLL $+1, DI - MOVL DI, (SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1859775393(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL (SP), DI - MOVL DI, 128(R8) - - // ROUND2(33) - // SHUFFLE - MOVL 4(SP), DI - XORL 56(SP), DI - XORL 36(SP), DI - XORL 12(SP), DI - ROLL $+1, DI - MOVL DI, 4(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1859775393(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 4(SP), DI - MOVL DI, 132(R8) - - // ROUND2(34) - // SHUFFLE - MOVL 8(SP), DI - XORL 60(SP), DI - XORL 40(SP), DI - XORL 16(SP), DI - ROLL $+1, DI - MOVL DI, 8(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1859775393(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 8(SP), DI - MOVL DI, 136(R8) - - // ROUND2(35) - // SHUFFLE - MOVL 12(SP), DI - XORL (SP), DI - XORL 44(SP), DI - XORL 20(SP), DI - ROLL $+1, DI - MOVL DI, 12(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 1859775393(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 12(SP), DI - MOVL DI, 140(R8) - - // ROUND2(36) - // SHUFFLE - MOVL 16(SP), DI - XORL 4(SP), DI - XORL 48(SP), DI - XORL 24(SP), DI - ROLL $+1, DI - MOVL DI, 16(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 1859775393(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 16(SP), DI - MOVL DI, 144(R8) - - // ROUND2(37) - // SHUFFLE - MOVL 20(SP), DI - XORL 8(SP), DI - XORL 52(SP), DI - XORL 28(SP), DI - ROLL $+1, DI - MOVL DI, 20(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 1859775393(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 20(SP), DI - MOVL DI, 148(R8) - - // ROUND2(38) - // SHUFFLE - MOVL 24(SP), DI - XORL 12(SP), DI - XORL 56(SP), DI - XORL 32(SP), DI - ROLL $+1, DI - MOVL DI, 24(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 1859775393(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 24(SP), DI - MOVL DI, 152(R8) - - // ROUND2(39) - // SHUFFLE - MOVL 28(SP), DI - XORL 16(SP), DI - XORL 60(SP), DI - XORL 36(SP), DI - ROLL $+1, DI - MOVL DI, 28(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 1859775393(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 28(SP), DI - MOVL DI, 156(R8) - - // ROUND3 (steps 40-59) - // ROUND3(40) - // SHUFFLE - MOVL 32(SP), DI - XORL 20(SP), DI - XORL (SP), DI - XORL 40(SP), DI - ROLL $+1, DI - MOVL DI, 32(SP) - - // FUNC3 - MOVL R10, R8 - ORL R11, R8 - ANDL R12, R8 - MOVL R10, R14 - ANDL R11, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 2400959708(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 32(SP), DI - MOVL DI, 160(R8) - - // ROUND3(41) - // SHUFFLE - MOVL 36(SP), DI - XORL 24(SP), DI - XORL 4(SP), DI - XORL 44(SP), DI - ROLL $+1, DI - MOVL DI, 36(SP) - - // FUNC3 - MOVL R9, R8 - ORL R10, R8 - ANDL R11, R8 - MOVL R9, R14 - ANDL R10, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 2400959708(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 36(SP), DI - MOVL DI, 164(R8) - - // ROUND3(42) - // SHUFFLE - MOVL 40(SP), DI - XORL 28(SP), DI - XORL 8(SP), DI - XORL 48(SP), DI - ROLL $+1, DI - MOVL DI, 40(SP) - - // FUNC3 - MOVL R13, R8 - ORL R9, R8 - ANDL R10, R8 - MOVL R13, R14 - ANDL R9, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 2400959708(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 40(SP), DI - MOVL DI, 168(R8) - - // ROUND3(43) - // SHUFFLE - MOVL 44(SP), DI - XORL 32(SP), DI - XORL 12(SP), DI - XORL 52(SP), DI - ROLL $+1, DI - MOVL DI, 44(SP) - - // FUNC3 - MOVL R12, R8 - ORL R13, R8 - ANDL R9, R8 - MOVL R12, R14 - ANDL R13, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 2400959708(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 44(SP), DI - MOVL DI, 172(R8) - - // ROUND3(44) - // SHUFFLE - MOVL 48(SP), DI - XORL 36(SP), DI - XORL 16(SP), DI - XORL 56(SP), DI - ROLL $+1, DI - MOVL DI, 48(SP) - - // FUNC3 - MOVL R11, R8 - ORL R12, R8 - ANDL R13, R8 - MOVL R11, R14 - ANDL R12, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 2400959708(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 48(SP), DI - MOVL DI, 176(R8) - - // ROUND3(45) - // SHUFFLE - MOVL 52(SP), DI - XORL 40(SP), DI - XORL 20(SP), DI - XORL 60(SP), DI - ROLL $+1, DI - MOVL DI, 52(SP) - - // FUNC3 - MOVL R10, R8 - ORL R11, R8 - ANDL R12, R8 - MOVL R10, R14 - ANDL R11, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 2400959708(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 52(SP), DI - MOVL DI, 180(R8) - - // ROUND3(46) - // SHUFFLE - MOVL 56(SP), DI - XORL 44(SP), DI - XORL 24(SP), DI - XORL (SP), DI - ROLL $+1, DI - MOVL DI, 56(SP) - - // FUNC3 - MOVL R9, R8 - ORL R10, R8 - ANDL R11, R8 - MOVL R9, R14 - ANDL R10, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 2400959708(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 56(SP), DI - MOVL DI, 184(R8) - - // ROUND3(47) - // SHUFFLE - MOVL 60(SP), DI - XORL 48(SP), DI - XORL 28(SP), DI - XORL 4(SP), DI - ROLL $+1, DI - MOVL DI, 60(SP) - - // FUNC3 - MOVL R13, R8 - ORL R9, R8 - ANDL R10, R8 - MOVL R13, R14 - ANDL R9, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 2400959708(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 60(SP), DI - MOVL DI, 188(R8) - - // ROUND3(48) - // SHUFFLE - MOVL (SP), DI - XORL 52(SP), DI - XORL 32(SP), DI - XORL 8(SP), DI - ROLL $+1, DI - MOVL DI, (SP) - - // FUNC3 - MOVL R12, R8 - ORL R13, R8 - ANDL R9, R8 - MOVL R12, R14 - ANDL R13, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 2400959708(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL (SP), DI - MOVL DI, 192(R8) - - // ROUND3(49) - // SHUFFLE - MOVL 4(SP), DI - XORL 56(SP), DI - XORL 36(SP), DI - XORL 12(SP), DI - ROLL $+1, DI - MOVL DI, 4(SP) - - // FUNC3 - MOVL R11, R8 - ORL R12, R8 - ANDL R13, R8 - MOVL R11, R14 - ANDL R12, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 2400959708(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 4(SP), DI - MOVL DI, 196(R8) - - // ROUND3(50) - // SHUFFLE - MOVL 8(SP), DI - XORL 60(SP), DI - XORL 40(SP), DI - XORL 16(SP), DI - ROLL $+1, DI - MOVL DI, 8(SP) - - // FUNC3 - MOVL R10, R8 - ORL R11, R8 - ANDL R12, R8 - MOVL R10, R14 - ANDL R11, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 2400959708(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 8(SP), DI - MOVL DI, 200(R8) - - // ROUND3(51) - // SHUFFLE - MOVL 12(SP), DI - XORL (SP), DI - XORL 44(SP), DI - XORL 20(SP), DI - ROLL $+1, DI - MOVL DI, 12(SP) - - // FUNC3 - MOVL R9, R8 - ORL R10, R8 - ANDL R11, R8 - MOVL R9, R14 - ANDL R10, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 2400959708(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 12(SP), DI - MOVL DI, 204(R8) - - // ROUND3(52) - // SHUFFLE - MOVL 16(SP), DI - XORL 4(SP), DI - XORL 48(SP), DI - XORL 24(SP), DI - ROLL $+1, DI - MOVL DI, 16(SP) - - // FUNC3 - MOVL R13, R8 - ORL R9, R8 - ANDL R10, R8 - MOVL R13, R14 - ANDL R9, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 2400959708(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 16(SP), DI - MOVL DI, 208(R8) - - // ROUND3(53) - // SHUFFLE - MOVL 20(SP), DI - XORL 8(SP), DI - XORL 52(SP), DI - XORL 28(SP), DI - ROLL $+1, DI - MOVL DI, 20(SP) - - // FUNC3 - MOVL R12, R8 - ORL R13, R8 - ANDL R9, R8 - MOVL R12, R14 - ANDL R13, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 2400959708(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 20(SP), DI - MOVL DI, 212(R8) - - // ROUND3(54) - // SHUFFLE - MOVL 24(SP), DI - XORL 12(SP), DI - XORL 56(SP), DI - XORL 32(SP), DI - ROLL $+1, DI - MOVL DI, 24(SP) - - // FUNC3 - MOVL R11, R8 - ORL R12, R8 - ANDL R13, R8 - MOVL R11, R14 - ANDL R12, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 2400959708(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 24(SP), DI - MOVL DI, 216(R8) - - // ROUND3(55) - // SHUFFLE - MOVL 28(SP), DI - XORL 16(SP), DI - XORL 60(SP), DI - XORL 36(SP), DI - ROLL $+1, DI - MOVL DI, 28(SP) - - // FUNC3 - MOVL R10, R8 - ORL R11, R8 - ANDL R12, R8 - MOVL R10, R14 - ANDL R11, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 2400959708(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 28(SP), DI - MOVL DI, 220(R8) - - // ROUND3(56) - // SHUFFLE - MOVL 32(SP), DI - XORL 20(SP), DI - XORL (SP), DI - XORL 40(SP), DI - ROLL $+1, DI - MOVL DI, 32(SP) - - // FUNC3 - MOVL R9, R8 - ORL R10, R8 - ANDL R11, R8 - MOVL R9, R14 - ANDL R10, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 2400959708(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 32(SP), DI - MOVL DI, 224(R8) - - // ROUND3(57) - // SHUFFLE - MOVL 36(SP), DI - XORL 24(SP), DI - XORL 4(SP), DI - XORL 44(SP), DI - ROLL $+1, DI - MOVL DI, 36(SP) - - // FUNC3 - MOVL R13, R8 - ORL R9, R8 - ANDL R10, R8 - MOVL R13, R14 - ANDL R9, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 2400959708(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 36(SP), DI - MOVL DI, 228(R8) - - // Load cs - MOVQ cs_base+56(FP), R8 - MOVL R11, 20(R8) - MOVL R12, 24(R8) - MOVL R13, 28(R8) - MOVL R9, 32(R8) - MOVL R10, 36(R8) - - // ROUND3(58) - // SHUFFLE - MOVL 40(SP), DI - XORL 28(SP), DI - XORL 8(SP), DI - XORL 48(SP), DI - ROLL $+1, DI - MOVL DI, 40(SP) - - // FUNC3 - MOVL R12, R8 - ORL R13, R8 - ANDL R9, R8 - MOVL R12, R14 - ANDL R13, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 2400959708(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 40(SP), DI - MOVL DI, 232(R8) - - // ROUND3(59) - // SHUFFLE - MOVL 44(SP), DI - XORL 32(SP), DI - XORL 12(SP), DI - XORL 52(SP), DI - ROLL $+1, DI - MOVL DI, 44(SP) - - // FUNC3 - MOVL R11, R8 - ORL R12, R8 - ANDL R13, R8 - MOVL R11, R14 - ANDL R12, R14 - ORL R8, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 2400959708(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 44(SP), DI - MOVL DI, 236(R8) - - // ROUND4 (steps 60-79) - // ROUND4(60) - // SHUFFLE - MOVL 48(SP), DI - XORL 36(SP), DI - XORL 16(SP), DI - XORL 56(SP), DI - ROLL $+1, DI - MOVL DI, 48(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 3395469782(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 48(SP), DI - MOVL DI, 240(R8) - - // ROUND4(61) - // SHUFFLE - MOVL 52(SP), DI - XORL 40(SP), DI - XORL 20(SP), DI - XORL 60(SP), DI - ROLL $+1, DI - MOVL DI, 52(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 3395469782(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 52(SP), DI - MOVL DI, 244(R8) - - // ROUND4(62) - // SHUFFLE - MOVL 56(SP), DI - XORL 44(SP), DI - XORL 24(SP), DI - XORL (SP), DI - ROLL $+1, DI - MOVL DI, 56(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 3395469782(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 56(SP), DI - MOVL DI, 248(R8) - - // ROUND4(63) - // SHUFFLE - MOVL 60(SP), DI - XORL 48(SP), DI - XORL 28(SP), DI - XORL 4(SP), DI - ROLL $+1, DI - MOVL DI, 60(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 3395469782(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 60(SP), DI - MOVL DI, 252(R8) - - // ROUND4(64) - // SHUFFLE - MOVL (SP), DI - XORL 52(SP), DI - XORL 32(SP), DI - XORL 8(SP), DI - ROLL $+1, DI - MOVL DI, (SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 3395469782(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL (SP), DI - MOVL DI, 256(R8) - - // Load cs - MOVQ cs_base+56(FP), R8 - MOVL R9, 40(R8) - MOVL R10, 44(R8) - MOVL R11, 48(R8) - MOVL R12, 52(R8) - MOVL R13, 56(R8) - - // ROUND4(65) - // SHUFFLE - MOVL 4(SP), DI - XORL 56(SP), DI - XORL 36(SP), DI - XORL 12(SP), DI - ROLL $+1, DI - MOVL DI, 4(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 3395469782(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 4(SP), DI - MOVL DI, 260(R8) - - // ROUND4(66) - // SHUFFLE - MOVL 8(SP), DI - XORL 60(SP), DI - XORL 40(SP), DI - XORL 16(SP), DI - ROLL $+1, DI - MOVL DI, 8(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 3395469782(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 8(SP), DI - MOVL DI, 264(R8) - - // ROUND4(67) - // SHUFFLE - MOVL 12(SP), DI - XORL (SP), DI - XORL 44(SP), DI - XORL 20(SP), DI - ROLL $+1, DI - MOVL DI, 12(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 3395469782(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 12(SP), DI - MOVL DI, 268(R8) - - // ROUND4(68) - // SHUFFLE - MOVL 16(SP), DI - XORL 4(SP), DI - XORL 48(SP), DI - XORL 24(SP), DI - ROLL $+1, DI - MOVL DI, 16(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 3395469782(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 16(SP), DI - MOVL DI, 272(R8) - - // ROUND4(69) - // SHUFFLE - MOVL 20(SP), DI - XORL 8(SP), DI - XORL 52(SP), DI - XORL 28(SP), DI - ROLL $+1, DI - MOVL DI, 20(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 3395469782(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 20(SP), DI - MOVL DI, 276(R8) - - // ROUND4(70) - // SHUFFLE - MOVL 24(SP), DI - XORL 12(SP), DI - XORL 56(SP), DI - XORL 32(SP), DI - ROLL $+1, DI - MOVL DI, 24(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 3395469782(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 24(SP), DI - MOVL DI, 280(R8) - - // ROUND4(71) - // SHUFFLE - MOVL 28(SP), DI - XORL 16(SP), DI - XORL 60(SP), DI - XORL 36(SP), DI - ROLL $+1, DI - MOVL DI, 28(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 3395469782(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 28(SP), DI - MOVL DI, 284(R8) - - // ROUND4(72) - // SHUFFLE - MOVL 32(SP), DI - XORL 20(SP), DI - XORL (SP), DI - XORL 40(SP), DI - ROLL $+1, DI - MOVL DI, 32(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 3395469782(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 32(SP), DI - MOVL DI, 288(R8) - - // ROUND4(73) - // SHUFFLE - MOVL 36(SP), DI - XORL 24(SP), DI - XORL 4(SP), DI - XORL 44(SP), DI - ROLL $+1, DI - MOVL DI, 36(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 3395469782(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 36(SP), DI - MOVL DI, 292(R8) - - // ROUND4(74) - // SHUFFLE - MOVL 40(SP), DI - XORL 28(SP), DI - XORL 8(SP), DI - XORL 48(SP), DI - ROLL $+1, DI - MOVL DI, 40(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 3395469782(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 40(SP), DI - MOVL DI, 296(R8) - - // ROUND4(75) - // SHUFFLE - MOVL 44(SP), DI - XORL 32(SP), DI - XORL 12(SP), DI - XORL 52(SP), DI - ROLL $+1, DI - MOVL DI, 44(SP) - - // FUNC2 - MOVL R10, R14 - XORL R11, R14 - XORL R12, R14 - - // MIX - ROLL $+30, R10 - ADDL R14, R13 - MOVL R9, R8 - ROLL $+5, R8 - LEAL 3395469782(R13)(DI*1), R13 - ADDL R8, R13 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 44(SP), DI - MOVL DI, 300(R8) - - // ROUND4(76) - // SHUFFLE - MOVL 48(SP), DI - XORL 36(SP), DI - XORL 16(SP), DI - XORL 56(SP), DI - ROLL $+1, DI - MOVL DI, 48(SP) - - // FUNC2 - MOVL R9, R14 - XORL R10, R14 - XORL R11, R14 - - // MIX - ROLL $+30, R9 - ADDL R14, R12 - MOVL R13, R8 - ROLL $+5, R8 - LEAL 3395469782(R12)(DI*1), R12 - ADDL R8, R12 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 48(SP), DI - MOVL DI, 304(R8) - - // ROUND4(77) - // SHUFFLE - MOVL 52(SP), DI - XORL 40(SP), DI - XORL 20(SP), DI - XORL 60(SP), DI - ROLL $+1, DI - MOVL DI, 52(SP) - - // FUNC2 - MOVL R13, R14 - XORL R9, R14 - XORL R10, R14 - - // MIX - ROLL $+30, R13 - ADDL R14, R11 - MOVL R12, R8 - ROLL $+5, R8 - LEAL 3395469782(R11)(DI*1), R11 - ADDL R8, R11 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 52(SP), DI - MOVL DI, 308(R8) - - // ROUND4(78) - // SHUFFLE - MOVL 56(SP), DI - XORL 44(SP), DI - XORL 24(SP), DI - XORL (SP), DI - ROLL $+1, DI - MOVL DI, 56(SP) - - // FUNC2 - MOVL R12, R14 - XORL R13, R14 - XORL R9, R14 - - // MIX - ROLL $+30, R12 - ADDL R14, R10 - MOVL R11, R8 - ROLL $+5, R8 - LEAL 3395469782(R10)(DI*1), R10 - ADDL R8, R10 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 56(SP), DI - MOVL DI, 312(R8) - - // ROUND4(79) - // SHUFFLE - MOVL 60(SP), DI - XORL 48(SP), DI - XORL 28(SP), DI - XORL 4(SP), DI - ROLL $+1, DI - MOVL DI, 60(SP) - - // FUNC2 - MOVL R11, R14 - XORL R12, R14 - XORL R13, R14 - - // MIX - ROLL $+30, R11 - ADDL R14, R9 - MOVL R10, R8 - ROLL $+5, R8 - LEAL 3395469782(R9)(DI*1), R9 - ADDL R8, R9 - - // Load m1 - MOVQ m1_base+32(FP), R8 - MOVL 60(SP), DI - MOVL DI, 316(R8) - - // Add registers to temp hash. - ADDL R9, AX - ADDL R10, BX - ADDL R11, CX - ADDL R12, DX - ADDL R13, BP - ADDQ $+64, SI - JB loop - -end: - MOVQ dig+0(FP), DI - MOVL AX, (DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - MOVL BP, 16(DI) + // Save ABCD and E working values + VMOVDQA X5, (AX) + VMOVDQA X0, 16(AX) + + // LOAD CS 0 + VPEXTRD $3, X5, R12 + LOADCS(X0, R12, 0, R15) + + // Rounds 0-3 + VMOVDQU (SI), X1 + PSHUFB X7, X1 + PADDD X1, X5 + VMOVDQA X0, X6 + SHA1RNDS4 $0x00, X5, X0 + LOADM1(X1, 0, R13) + + // Rounds 4-7 + VMOVDQU 16(SI), X2 + PSHUFB X7, X2 + SHA1NEXTE X2, X6 + VMOVDQA X0, X5 + SHA1RNDS4 $0x00, X6, X0 + SHA1MSG1 X2, X1 + LOADM1(X2, 1, R13) + + // Rounds 8-11 + VMOVDQU 32(SI), X3 + PSHUFB X7, X3 + SHA1NEXTE X3, X5 + VMOVDQA X0, X6 + SHA1RNDS4 $0x00, X5, X0 + SHA1MSG1 X3, X2 + PXOR X3, X1 + LOADM1(X3, 2, R13) + + // Rounds 12-15 + VMOVDQU 48(SI), X4 + PSHUFB X7, X4 + SHA1NEXTE X4, X6 + VMOVDQA X0, X5 + SHA1MSG2 X4, X1 + SHA1RNDS4 $0x00, X6, X0 + SHA1MSG1 X4, X3 + PXOR X4, X2 + LOADM1(X4, 3, R13) + + // Rounds 16-19 + SHA1NEXTE X1, X5 + VMOVDQA X0, X6 + SHA1MSG2 X1, X2 + SHA1RNDS4 $0x00, X5, X0 + SHA1MSG1 X1, X4 + PXOR X1, X3 + LOADM1(X1, 4, R13) + + // Rounds 20-23 + SHA1NEXTE X2, X6 + VMOVDQA X0, X5 + SHA1MSG2 X2, X3 + SHA1RNDS4 $0x01, X6, X0 + SHA1MSG1 X2, X1 + PXOR X2, X4 + LOADM1(X2, 5, R13) + + // Rounds 24-27 + SHA1NEXTE X3, X5 + VMOVDQA X0, X6 + SHA1MSG2 X3, X4 + SHA1RNDS4 $0x01, X5, X0 + SHA1MSG1 X3, X2 + PXOR X3, X1 + LOADM1(X3, 6, R13) + + // Rounds 28-31 + SHA1NEXTE X4, X6 + VMOVDQA X0, X5 + SHA1MSG2 X4, X1 + SHA1RNDS4 $0x01, X6, X0 + SHA1MSG1 X4, X3 + PXOR X4, X2 + LOADM1(X4, 7, R13) + + // Rounds 32-35 + SHA1NEXTE X1, X5 + VMOVDQA X0, X6 + SHA1MSG2 X1, X2 + SHA1RNDS4 $0x01, X5, X0 + SHA1MSG1 X1, X4 + PXOR X1, X3 + LOADM1(X1, 8, R13) + + // Rounds 36-39 + SHA1NEXTE X2, X6 + VMOVDQA X0, X5 + SHA1MSG2 X2, X3 + SHA1RNDS4 $0x01, X6, X0 + SHA1MSG1 X2, X1 + PXOR X2, X4 + LOADM1(X2, 9, R13) + + // Rounds 40-43 + SHA1NEXTE X3, X5 + VMOVDQA X0, X6 + SHA1MSG2 X3, X4 + SHA1RNDS4 $0x02, X5, X0 + SHA1MSG1 X3, X2 + PXOR X3, X1 + LOADM1(X3, 10, R13) + + // Rounds 44-47 + SHA1NEXTE X4, X6 + VMOVDQA X0, X5 + SHA1MSG2 X4, X1 + SHA1RNDS4 $0x02, X6, X0 + SHA1MSG1 X4, X3 + PXOR X4, X2 + LOADM1(X4, 11, R13) + + // Rounds 48-51 + SHA1NEXTE X1, X5 + VMOVDQA X0, X6 + SHA1MSG2 X1, X2 + SHA1RNDS4 $0x02, X5, X0 + VPEXTRD $0, X5, R12 + SHA1MSG1 X1, X4 + PXOR X1, X3 + LOADM1(X1, 12, R13) + + // derive pre-round 56's E out of round 51's A. + VPEXTRD $3, X0, R12 + ROLL $30, R12 + + // Rounds 52-55 + SHA1NEXTE X2, X6 + VMOVDQA X0, X5 + SHA1MSG2 X2, X3 + SHA1RNDS4 $0x02, X6, X0 + SHA1MSG1 X2, X1 + PXOR X2, X4 + LOADM1(X2, 13, R13) + + // LOAD CS 58 (gathers 56 which will be rectified in Go) + LOADCS(X0, R12, 1, R15) + + // Rounds 56-59 + SHA1NEXTE X3, X5 + VMOVDQA X0, X6 + SHA1MSG2 X3, X4 + SHA1RNDS4 $0x02, X5, X0 + VPEXTRD $0, X5, R12 + SHA1MSG1 X3, X2 + PXOR X3, X1 + LOADM1(X3, 14, R13) + + // derive pre-round 64's E out of round 59's A. + VPEXTRD $3, X0, R12 + ROLL $30, R12 + + // Rounds 60-63 + SHA1NEXTE X4, X6 + VMOVDQA X0, X5 + SHA1MSG2 X4, X1 + SHA1RNDS4 $0x03, X6, X0 + SHA1MSG1 X4, X3 + PXOR X4, X2 + LOADM1(X4, 15, R13) + + // LOAD CS 65 (gathers 64 which will be rectified in Go) + LOADCS(X0, R12, 2, R15) + + // Rounds 64-67 + SHA1NEXTE X1, X5 + VMOVDQA X0, X6 + SHA1MSG2 X1, X2 + SHA1RNDS4 $0x03, X5, X0 + SHA1MSG1 X1, X4 + PXOR X1, X3 + LOADM1(X1, 16, R13) + + // Rounds 68-71 + SHA1NEXTE X2, X6 + VMOVDQA X0, X5 + SHA1MSG2 X2, X3 + SHA1RNDS4 $0x03, X6, X0 + PXOR X2, X4 + LOADM1(X2, 17, R13) + + // Rounds 72-75 + SHA1NEXTE X3, X5 + VMOVDQA X0, X6 + SHA1MSG2 X3, X4 + SHA1RNDS4 $0x03, X5, X0 + LOADM1(X3, 18, R13) + + // Rounds 76-79 + SHA1NEXTE X4, X6 + VMOVDQA X0, X5 + SHA1RNDS4 $0x03, X6, X0 + LOADM1(X4, 19, R13) + + // Add saved E and ABCD + SHA1NEXTE (AX), X5 + PADDD 16(AX), X0 + + // Check if we are done, if not return to the loop + ADDQ $0x40, SI + CMPQ SI, DX + JNE loop + + // Write the hash state back to digest + PSHUFD $0x1b, X0, X0 + VMOVDQU X0, (DI) + PEXTRD $0x03, X5, 16(DI) + +done: RET + +DATA upper_mask<>+0(SB)/8, $0x0000000000000000 +DATA upper_mask<>+8(SB)/8, $0xffffffff00000000 +GLOBL upper_mask<>(SB), RODATA, $16 + +DATA shuffle_mask<>+0(SB)/8, $0x08090a0b0c0d0e0f +DATA shuffle_mask<>+8(SB)/8, $0x0001020304050607 +GLOBL shuffle_mask<>(SB), RODATA, $16 diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.go index f1edca0b..e641c958 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.go @@ -4,39 +4,43 @@ package sha1cd import ( - "math" - "unsafe" + "runtime" + "github.com/klauspost/cpuid/v2" shared "github.com/pjbgf/sha1cd/internal" ) -// blockARM64 hashes the message p into the current state in dig. +var hasSHA1 = (runtime.GOARCH == "arm64" && cpuid.CPU.Supports(cpuid.SHA1)) + +// blockARM64 hashes the message p into the current state in h. // Both m1 and cs are used to store intermediate results which are used by the collision detection logic. // //go:noescape -func blockARM64(dig *digest, p sliceHeader, m1 []uint32, cs [][5]uint32) +func blockARM64(h []uint32, p []byte, m1 []uint32, cs [][5]uint32) func block(dig *digest, p []byte) { + if forceGeneric || !hasSHA1 { + blockGeneric(dig, p) + return + } + m1 := [shared.Rounds]uint32{} cs := [shared.PreStepState][shared.WordBuffers]uint32{} for len(p) >= shared.Chunk { - // Only send a block to be processed, as the collission detection - // works on a block by block basis. - ips := sliceHeader{ - base: uintptr(unsafe.Pointer(&p[0])), - len: int(math.Min(float64(len(p)), float64(shared.Chunk))), - cap: shared.Chunk, - } + // The assembly code only supports processing a block at a time, + // so adjust the chunk accordingly. + chunk := p[:shared.Chunk] - blockARM64(dig, ips, m1[:], cs[:]) + blockARM64(dig.h[:], chunk, m1[:], cs[:]) + rectifyCompressionState(m1, &cs) col := checkCollision(m1, cs, dig.h) if col { dig.col = true - blockARM64(dig, ips, m1[:], cs[:]) - blockARM64(dig, ips, m1[:], cs[:]) + blockARM64(dig.h[:], chunk, m1[:], cs[:]) + blockARM64(dig.h[:], chunk, m1[:], cs[:]) } p = p[shared.Chunk:] diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.s index 680d46df..63762049 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.s +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_arm64.s @@ -2,243 +2,248 @@ #include "textflag.h" -#define RoundConst0 $1518500249 // 0x5A827999 -#define RoundConst1 $1859775393 // 0x6ED9EBA1 -#define RoundConst2 $2400959708 // 0x8F1BBCDC -#define RoundConst3 $3395469782 // 0xCA62C1D6 +// License information for the original SHA1 arm64 implemention: +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at: +// - https://github.com/golang/go/blob/master/LICENSE +// +// Reference implementations: +// - https://github.com/noloader/SHA-Intrinsics/blob/master/sha1-arm.c +// - https://github.com/golang/go/blob/master/src/crypto/sha1/sha1block_arm64.s -// FUNC1 f = (b & c) | ((~b) & d) -#define FUNC1(b, c, d) \ - MOVW d, R15; \ - EORW c, R15; \ - ANDW b, R15; \ - EORW d, R15 +#define HASHUPDATECHOOSE \ + SHA1C V16.S4, V1, V2 \ + SHA1H V3, V1 \ + VMOV V2.B16, V3.B16 -// FUNC2 f = b ^ c ^ d -#define FUNC2(b, c, d) \ - MOVW b, R15; \ - EORW c, R15; \ - EORW d, R15 +#define HASHUPDATEPARITY \ + SHA1P V16.S4, V1, V2 \ + SHA1H V3, V1 \ + VMOV V2.B16, V3.B16 -// FUNC3 f = (b & c) | (b & d) | (c & d) -#define FUNC3(b, c, d) \ - MOVW b, R27; \ - ORR c, R27, R27; \ - ANDW d, R27, R27; \ - MOVW b, R15; \ - ANDW c, R15, R15; \ - ORR R27, R15, R15 +#define HASHUPDATEMAJ \ + SHA1M V16.S4, V1, V2 \ + SHA1H V3, V1 \ + VMOV V2.B16, V3.B16 -#define FUNC4(b, c, d) FUNC2(b, c, d) - -#define MIX(a, b, c, d, e, k) \ - RORW $2, b, b; \ - ADDW R15, e, e; \ - MOVW a, R27; \ - RORW $27, R27, R27; \ - MOVW k, R19; \ - ADDW R19, e, e; \ - ADDW R9, e, e; \ - ADDW R27, e, e +// func blockARM64(h []uint32, p []byte, m1 []uint32, cs [][5]uint32) +TEXT ·blockARM64(SB), NOSPLIT, $80-96 + MOVD h_base+0(FP), R0 + MOVD p_base+24(FP), R1 + MOVD p_len+32(FP), R2 + MOVD m1_base+48(FP), R3 + MOVD cs_base+72(FP), R4 -#define LOAD(index) \ - MOVWU (index*4)(R16), R9; \ - REVW R9, R9; \ - MOVW R9, (index*4)(RSP) + LSR $6, R2, R2 + LSL $6, R2, R2 + ADD R16, R2, R21 -#define LOADCS(a, b, c, d, e, index) \ - MOVD cs_base+56(FP), R27; \ - MOVW a, ((index*20))(R27); \ - MOVW b, ((index*20)+4)(R27); \ - MOVW c, ((index*20)+8)(R27); \ - MOVW d, ((index*20)+12)(R27); \ - MOVW e, ((index*20)+16)(R27) - -#define SHUFFLE(index) \ - MOVW ((index&0xf)*4)(RSP), R9; \ - MOVW (((index-3)&0xf)*4)(RSP), R20; \ - EORW R20, R9; \ - MOVW (((index-8)&0xf)*4)(RSP), R20; \ - EORW R20, R9; \ - MOVW (((index-14)&0xf)*4)(RSP), R20; \ - EORW R20, R9; \ - RORW $31, R9, R9; \ - MOVW R9, ((index&0xf)*4)(RSP) - -// LOADM1 stores message word to m1 array. -#define LOADM1(index) \ - MOVD m1_base+32(FP), R27; \ - MOVW ((index&0xf)*4)(RSP), R9; \ - MOVW R9, (index*4)(R27) - -#define ROUND1(a, b, c, d, e, index) \ - LOAD(index); \ - FUNC1(b, c, d); \ - MIX(a, b, c, d, e, RoundConst0); \ - LOADM1(index) - -#define ROUND1x(a, b, c, d, e, index) \ - SHUFFLE(index); \ - FUNC1(b, c, d); \ - MIX(a, b, c, d, e, RoundConst0); \ - LOADM1(index) - -#define ROUND2(a, b, c, d, e, index) \ - SHUFFLE(index); \ - FUNC2(b, c, d); \ - MIX(a, b, c, d, e, RoundConst1); \ - LOADM1(index) - -#define ROUND3(a, b, c, d, e, index) \ - SHUFFLE(index); \ - FUNC3(b, c, d); \ - MIX(a, b, c, d, e, RoundConst2); \ - LOADM1(index) - -#define ROUND4(a, b, c, d, e, index) \ - SHUFFLE(index); \ - FUNC4(b, c, d); \ - MIX(a, b, c, d, e, RoundConst3); \ - LOADM1(index) - -// func blockARM64(dig *digest, p []byte, m1 []uint32, cs [][5]uint32) -TEXT ·blockARM64(SB), NOSPLIT, $64-80 - MOVD dig+0(FP), R8 - MOVD p_base+8(FP), R16 - MOVD p_len+16(FP), R10 - - LSR $6, R10, R10 - LSL $6, R10, R10 - ADD R16, R10, R21 - - // Load h0-h4 into R1–R5. - MOVW (R8), R1 // R1 = h0 - MOVW 4(R8), R2 // R2 = h1 - MOVW 8(R8), R3 // R3 = h2 - MOVW 12(R8), R4 // R4 = h3 - MOVW 16(R8), R5 // R5 = h4 + VLD1.P 16(R0), [V0.S4] + FMOVS (R0), F20 + SUB $16, R0, R0 loop: - // len(p) >= chunk - CMP R16, R21 - BLS end + CMP R16, R21 + BLS end - // Initialize registers a, b, c, d, e. - MOVW R1, R10 - MOVW R2, R11 - MOVW R3, R12 - MOVW R4, R13 - MOVW R5, R14 + // Load block (p) into 16-bytes vectors. + VLD1.P 16(R1), [V4.B16] + VLD1.P 16(R1), [V5.B16] + VLD1.P 16(R1), [V6.B16] + VLD1.P 16(R1), [V7.B16] + + // Load K constants to V19 + MOVD $·sha1Ks(SB), R22 + VLD1 (R22), [V19.S4] + + VMOV V0.B16, V2.B16 + VMOV V20.S[0], V1 + VMOV V2.B16, V3.B16 + VDUP V19.S[0], V17.S4 + + // Little Endian + VREV32 V4.B16, V4.B16 + VREV32 V5.B16, V5.B16 + VREV32 V6.B16, V6.B16 + VREV32 V7.B16, V7.B16 + + // LOAD M1 rounds 0-15 + VST1.P [V4.S4], (R3) + VST1.P [V5.S4], (R3) + VST1.P [V6.S4], (R3) + VST1.P [V7.S4], (R3) - // ROUND1 (steps 0-15) - LOADCS(R10, R11, R12, R13, R14, 0) - ROUND1(R10, R11, R12, R13, R14, 0) - ROUND1(R14, R10, R11, R12, R13, 1) - ROUND1(R13, R14, R10, R11, R12, 2) - ROUND1(R12, R13, R14, R10, R11, 3) - ROUND1(R11, R12, R13, R14, R10, 4) - ROUND1(R10, R11, R12, R13, R14, 5) - ROUND1(R14, R10, R11, R12, R13, 6) - ROUND1(R13, R14, R10, R11, R12, 7) - ROUND1(R12, R13, R14, R10, R11, 8) - ROUND1(R11, R12, R13, R14, R10, 9) - ROUND1(R10, R11, R12, R13, R14, 10) - ROUND1(R14, R10, R11, R12, R13, 11) - ROUND1(R13, R14, R10, R11, R12, 12) - ROUND1(R12, R13, R14, R10, R11, 13) - ROUND1(R11, R12, R13, R14, R10, 14) - ROUND1(R10, R11, R12, R13, R14, 15) + // LOAD CS 0 + VST1.P [V0.S4], (R4) // ABCD pre-round 0 + VST1.P V1.S[0], 4(R4) // E pre-round 0 - // ROUND1x (steps 16-19) - same as ROUND1 but with no data load. - ROUND1x(R14, R10, R11, R12, R13, 16) - ROUND1x(R13, R14, R10, R11, R12, 17) - ROUND1x(R12, R13, R14, R10, R11, 18) - ROUND1x(R11, R12, R13, R14, R10, 19) + // Rounds 0-3 + VDUP V19.S[1], V18.S4 + VADD V17.S4, V4.S4, V16.S4 + SHA1SU0 V6.S4, V5.S4, V4.S4 + HASHUPDATECHOOSE + SHA1SU1 V7.S4, V4.S4 - // ROUND2 (steps 20-39) - ROUND2(R10, R11, R12, R13, R14, 20) - ROUND2(R14, R10, R11, R12, R13, 21) - ROUND2(R13, R14, R10, R11, R12, 22) - ROUND2(R12, R13, R14, R10, R11, 23) - ROUND2(R11, R12, R13, R14, R10, 24) - ROUND2(R10, R11, R12, R13, R14, 25) - ROUND2(R14, R10, R11, R12, R13, 26) - ROUND2(R13, R14, R10, R11, R12, 27) - ROUND2(R12, R13, R14, R10, R11, 28) - ROUND2(R11, R12, R13, R14, R10, 29) - ROUND2(R10, R11, R12, R13, R14, 30) - ROUND2(R14, R10, R11, R12, R13, 31) - ROUND2(R13, R14, R10, R11, R12, 32) - ROUND2(R12, R13, R14, R10, R11, 33) - ROUND2(R11, R12, R13, R14, R10, 34) - ROUND2(R10, R11, R12, R13, R14, 35) - ROUND2(R14, R10, R11, R12, R13, 36) - ROUND2(R13, R14, R10, R11, R12, 37) - ROUND2(R12, R13, R14, R10, R11, 38) - ROUND2(R11, R12, R13, R14, R10, 39) + // Rounds 4-7 + VADD V17.S4, V5.S4, V16.S4 + SHA1SU0 V7.S4, V6.S4, V5.S4 + HASHUPDATECHOOSE + SHA1SU1 V4.S4, V5.S4 + // LOAD M1 rounds 16-19 + VST1.P [V4.S4], (R3) - // ROUND3 (steps 40-59) - ROUND3(R10, R11, R12, R13, R14, 40) - ROUND3(R14, R10, R11, R12, R13, 41) - ROUND3(R13, R14, R10, R11, R12, 42) - ROUND3(R12, R13, R14, R10, R11, 43) - ROUND3(R11, R12, R13, R14, R10, 44) - ROUND3(R10, R11, R12, R13, R14, 45) - ROUND3(R14, R10, R11, R12, R13, 46) - ROUND3(R13, R14, R10, R11, R12, 47) - ROUND3(R12, R13, R14, R10, R11, 48) - ROUND3(R11, R12, R13, R14, R10, 49) - ROUND3(R10, R11, R12, R13, R14, 50) - ROUND3(R14, R10, R11, R12, R13, 51) - ROUND3(R13, R14, R10, R11, R12, 52) - ROUND3(R12, R13, R14, R10, R11, 53) - ROUND3(R11, R12, R13, R14, R10, 54) - ROUND3(R10, R11, R12, R13, R14, 55) - ROUND3(R14, R10, R11, R12, R13, 56) - ROUND3(R13, R14, R10, R11, R12, 57) + // Rounds 8-11 + VADD V17.S4, V6.S4, V16.S4 + SHA1SU0 V4.S4, V7.S4, V6.S4 + HASHUPDATECHOOSE + SHA1SU1 V5.S4, V6.S4 + // LOAD M1 rounds 20-23 + VST1.P [V5.S4], (R3) - LOADCS(R12, R13, R14, R10, R11, 1) - ROUND3(R12, R13, R14, R10, R11, 58) - ROUND3(R11, R12, R13, R14, R10, 59) + // Rounds 12-15 + VADD V17.S4, V7.S4, V16.S4 + SHA1SU0 V5.S4, V4.S4, V7.S4 + HASHUPDATECHOOSE + SHA1SU1 V6.S4, V7.S4 + // LOAD M1 rounds 24-27 + VST1.P [V6.S4], (R3) - // ROUND4 (steps 60-79) - ROUND4(R10, R11, R12, R13, R14, 60) - ROUND4(R14, R10, R11, R12, R13, 61) - ROUND4(R13, R14, R10, R11, R12, 62) - ROUND4(R12, R13, R14, R10, R11, 63) - ROUND4(R11, R12, R13, R14, R10, 64) + // Rounds 16-19 + VADD V17.S4, V4.S4, V16.S4 + SHA1SU0 V6.S4, V5.S4, V4.S4 + HASHUPDATECHOOSE + SHA1SU1 V7.S4, V4.S4 + // LOAD M1 rounds 28-31 + VST1.P [V7.S4], (R3) - LOADCS(R10, R11, R12, R13, R14, 2) - ROUND4(R10, R11, R12, R13, R14, 65) - ROUND4(R14, R10, R11, R12, R13, 66) - ROUND4(R13, R14, R10, R11, R12, 67) - ROUND4(R12, R13, R14, R10, R11, 68) - ROUND4(R11, R12, R13, R14, R10, 69) - ROUND4(R10, R11, R12, R13, R14, 70) - ROUND4(R14, R10, R11, R12, R13, 71) - ROUND4(R13, R14, R10, R11, R12, 72) - ROUND4(R12, R13, R14, R10, R11, 73) - ROUND4(R11, R12, R13, R14, R10, 74) - ROUND4(R10, R11, R12, R13, R14, 75) - ROUND4(R14, R10, R11, R12, R13, 76) - ROUND4(R13, R14, R10, R11, R12, 77) - ROUND4(R12, R13, R14, R10, R11, 78) - ROUND4(R11, R12, R13, R14, R10, 79) + // Rounds 20-23 + VDUP V19.S[2], V17.S4 + VADD V18.S4, V5.S4, V16.S4 + SHA1SU0 V7.S4, V6.S4, V5.S4 + HASHUPDATEPARITY + SHA1SU1 V4.S4, V5.S4 + // LOAD M1 rounds 32-35 + VST1.P [V4.S4], (R3) - // Add registers to temp hash. - ADDW R10, R1, R1 - ADDW R11, R2, R2 - ADDW R12, R3, R3 - ADDW R13, R4, R4 - ADDW R14, R5, R5 + // Rounds 24-27 + VADD V18.S4, V6.S4, V16.S4 + SHA1SU0 V4.S4, V7.S4, V6.S4 + HASHUPDATEPARITY + SHA1SU1 V5.S4, V6.S4 + // LOAD M1 rounds 36-39 + VST1.P [V5.S4], (R3) - ADD $64, R16, R16 - B loop + // Rounds 28-31 + VADD V18.S4, V7.S4, V16.S4 + SHA1SU0 V5.S4, V4.S4, V7.S4 + HASHUPDATEPARITY + SHA1SU1 V6.S4, V7.S4 + // LOAD M1 rounds 40-43 + VST1.P [V6.S4], (R3) + + // Rounds 32-35 + VADD V18.S4, V4.S4, V16.S4 + SHA1SU0 V6.S4, V5.S4, V4.S4 + HASHUPDATEPARITY + SHA1SU1 V7.S4, V4.S4 + // LOAD M1 rounds 44-47 + VST1.P [V7.S4], (R3) + + // Rounds 36-39 + VADD V18.S4, V5.S4, V16.S4 + SHA1SU0 V7.S4, V6.S4, V5.S4 + HASHUPDATEPARITY + SHA1SU1 V4.S4, V5.S4 + // LOAD M1 rounds 48-51 + VST1.P [V4.S4], (R3) + + // Rounds 44-47 + VDUP V19.S[3], V18.S4 + VADD V17.S4, V6.S4, V16.S4 + SHA1SU0 V4.S4, V7.S4, V6.S4 + HASHUPDATEMAJ + SHA1SU1 V5.S4, V6.S4 + // LOAD M1 rounds 52-55 + VST1.P [V5.S4], (R3) + + // Rounds 44-47 + VADD V17.S4, V7.S4, V16.S4 + SHA1SU0 V5.S4, V4.S4, V7.S4 + HASHUPDATEMAJ + SHA1SU1 V6.S4, V7.S4 + // LOAD M1 rounds 56-59 + VST1.P [V6.S4], (R3) + + // Rounds 48-51 + VADD V17.S4, V4.S4, V16.S4 + SHA1SU0 V6.S4, V5.S4, V4.S4 + HASHUPDATEMAJ + SHA1SU1 V7.S4, V4.S4 + // LOAD M1 rounds 60-63 + VST1.P [V7.S4], (R3) + + // Rounds 52-55 + VADD V17.S4, V5.S4, V16.S4 + SHA1SU0 V7.S4, V6.S4, V5.S4 + HASHUPDATEMAJ + SHA1SU1 V4.S4, V5.S4 + + // LOAD CS 58 + VST1.P [V3.S4], (R4) // ABCD pre-round 56 + VST1.P V1.S[0], 4(R4) // E pre-round 56 + + // Rounds 56-59 + VADD V17.S4, V6.S4, V16.S4 + SHA1SU0 V4.S4, V7.S4, V6.S4 + HASHUPDATEMAJ + SHA1SU1 V5.S4, V6.S4 + + // Rounds 60-63 + VADD V18.S4, V7.S4, V16.S4 + SHA1SU0 V5.S4, V4.S4, V7.S4 + HASHUPDATEPARITY + SHA1SU1 V6.S4, V7.S4 + + // LOAD CS 65 + VST1.P [V3.S4], (R4) // ABCD pre-round 64 + VST1.P V1.S[0], 4(R4) // E pre-round 64 + + // Rounds 64-67 + VADD V18.S4, V4.S4, V16.S4 + HASHUPDATEPARITY + + // LOAD M1 rounds 68-79 + VST1.P [V4.S4], (R3) + VST1.P [V5.S4], (R3) + VST1.P [V6.S4], (R3) + VST1.P [V7.S4], (R3) + + // Rounds 68-71 + VADD V18.S4, V5.S4, V16.S4 + HASHUPDATEPARITY + + // Rounds 72-75 + VADD V18.S4, V6.S4, V16.S4 + HASHUPDATEPARITY + + // Rounds 76-79 + VADD V18.S4, V7.S4, V16.S4 + HASHUPDATEPARITY + + // Add working registers to hash state. + VADD V2.S4, V0.S4, V0.S4 + VADD V1.S4, V20.S4, V20.S4 end: - MOVW R1, (R8) - MOVW R2, 4(R8) - MOVW R3, 8(R8) - MOVW R4, 12(R8) - MOVW R5, 16(R8) + // Update h with final hash values. + VST1.P [V0.S4], (R0) + FMOVS F20, (R0) + RET + +DATA ·sha1Ks+0(SB)/4, $0x5A827999 // K0 +DATA ·sha1Ks+4(SB)/4, $0x6ED9EBA1 // K1 +DATA ·sha1Ks+8(SB)/4, $0x8F1BBCDC // K2 +DATA ·sha1Ks+12(SB)/4, $0xCA62C1D6 // K3 +GLOBL ·sha1Ks(SB), RODATA, $16 diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_asm.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_asm.go deleted file mode 100644 index a4c7c062..00000000 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_asm.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build (!amd64 || !arm64) && !noasm -// +build !amd64 !arm64 -// +build !noasm - -package sha1cd - -type sliceHeader struct { - base uintptr - len int - cap int -} diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go index ba8b96e8..0569a1f6 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go @@ -15,6 +15,8 @@ import ( "github.com/pjbgf/sha1cd/ubc" ) +var forceGeneric bool + // blockGeneric is a portable, pure Go version of the SHA-1 block step. // It's used by sha1block_generic.go and tests. func blockGeneric(dig *digest, p []byte) { @@ -139,11 +141,12 @@ func blockGeneric(dig *digest, p []byte) { dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4 } +//go:noinline func checkCollision( m1 [shared.Rounds]uint32, cs [shared.PreStepState][shared.WordBuffers]uint32, - state [shared.WordBuffers]uint32) bool { - + h [shared.WordBuffers]uint32, +) bool { if mask := ubc.CalculateDvMask(m1); mask != 0 { dvs := ubc.SHA1_dvs() @@ -167,7 +170,7 @@ func checkCollision( // ubc's DM prior to the SHA recompression step. m1, dvs[i].Dm, csState, - state) + h) if col { return true @@ -178,6 +181,7 @@ func checkCollision( return false } +//go:nosplit func hasCollided(step uint32, m1, dm [shared.Rounds]uint32, state [shared.WordBuffers]uint32, h [shared.WordBuffers]uint32) bool { // Intermediary Hash Value. @@ -266,3 +270,42 @@ func hasCollided(step uint32, m1, dm [shared.Rounds]uint32, return false } + +// rectifyCompressionState fixes the compression state when using the +// SIMD implementations. +// +// Due to the way that hardware acceleration works, the rounds +// are executed 4 at a time. Therefore, the state for cs58 and cs65 +// are not available directly through the assembly logic. The states +// returned are for cs56 and cs64. This function updates indexes 1 and 2 +// of cs to contain the respective CS values for rounds 58 and 65. +// +//go:nosplit +func rectifyCompressionState( + m1 [shared.Rounds]uint32, + cs *[shared.PreStepState][shared.WordBuffers]uint32, +) { + if cs == nil { + return + } + + func3 := func(state [shared.WordBuffers]uint32, i int) [shared.WordBuffers]uint32 { + a, b, c, d, e := state[0], state[1], state[2], state[3], state[4] + + f := ((b | c) & d) | (b & c) + t := bits.RotateLeft32(a, 5) + f + e + m1[i] + shared.K2 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + return [shared.WordBuffers]uint32{a, b, c, d, e} + } + func4 := func(state [shared.WordBuffers]uint32, i int) [shared.WordBuffers]uint32 { + a, b, c, d, e := state[0], state[1], state[2], state[3], state[4] + f := b ^ c ^ d + t := bits.RotateLeft32(a, 5) + f + e + m1[i] + shared.K3 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + return [shared.WordBuffers]uint32{a, b, c, d, e} + } + + cs57 := func3(cs[1], 56) + cs[1] = func3(cs57, 57) + cs[2] = func4(cs[2], 64) +} diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go index 98c14ef4..4444c753 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go @@ -1,5 +1,4 @@ -//go:build (!amd64 && !arm64) || noasm || !gc -// +build !amd64,!arm64 noasm !gc +//go:build (!amd64 && !arm64) || noasm package sha1cd diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go index b0b4d76e..0da55c07 100644 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go @@ -2,4 +2,372 @@ // Unavoidable Bit Conditions that arise from crypto analysis attacks. package ubc -//go:generate go run -C asm . -out ../ubc_amd64.s -pkg $GOPACKAGE +// Based on the C implementation from Marc Stevens and Dan Shumow. +// https://github.com/cr-marcstevens/sha1collisiondetection + +type DvInfo struct { + // DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper). + // https://marc-stevens.nl/research/papers/C13-S.pdf + DvType uint32 + DvK uint32 + DvB uint32 + + // TestT is the step to do the recompression from for collision detection. + TestT uint32 + + // MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check. + MaskI uint32 + MaskB uint32 + + // Dm is the expanded message block XOR-difference defined by the DV. + Dm [80]uint32 +} + +// CalculateDvMask takes as input an expanded message block and +// verifies the unavoidable bitconditions for all listed DVs. It returns +// a dvmask where each bit belonging to a DV is set if all unavoidable +// bitconditions for that DV have been met. +// +//go:nosplit +func CalculateDvMask(W [80]uint32) uint32 { + mask := uint32(0xFFFFFFFF) + mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) + mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) + mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) + mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) + mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) + mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) + mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) + mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) + mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) + mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) + mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) + mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) + mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) + mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) + mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) + mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) + mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) + + if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + } + mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { + mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { + mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { + mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) + } + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { + mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + } + if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { + mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) + } + if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { + mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { + mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) + } + if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { + mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) + } + mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) + if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { + mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) + } + mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) + if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { + mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) + } + mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) + mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) + if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) + } + mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) + if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) + } + if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + } + mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { + mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { + mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) + } + if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { + mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) + } + mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { + mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) + } + if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { + mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { + mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { + mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { + mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { + mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { + mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) + } + mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { + mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) + } + mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) + mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) + mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) + mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) + mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) + mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) + mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) + mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) + mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) + mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) + if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { + mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) + } + if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { + mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) + } + if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { + mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) + } + if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { + mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) + } + if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + } + mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) + if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) + } + + if mask != 0 { + if (mask & DV_I_43_0_bit) != 0 { + if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || + not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || + not((W[58]^(W[63]>>30))&(1<<0)) != 0 { + mask &= ^DV_I_43_0_bit + } + } + if (mask & DV_I_44_0_bit) != 0 { + if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || + not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || + not((W[59]^(W[64]>>30))&(1<<0)) != 0 { + mask &= ^DV_I_44_0_bit + } + } + if (mask & DV_I_46_2_bit) != 0 { + mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) + } + if (mask & DV_I_47_2_bit) != 0 { + if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || + not(not((W[41]^W[43])&(1<<6))) != 0 { + mask &= ^DV_I_47_2_bit + } + } + if (mask & DV_I_48_2_bit) != 0 { + if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || + not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { + mask &= ^DV_I_48_2_bit + } + } + if (mask & DV_I_49_2_bit) != 0 { + if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || + not((W[42]^W[50])&(1<<1)) != 0 || + not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || + not((W[38]^W[40])&(1<<1)) != 0 { + mask &= ^DV_I_49_2_bit + } + } + if (mask & DV_I_50_0_bit) != 0 { + mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) + } + if (mask & DV_I_50_2_bit) != 0 { + mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) + } + if (mask & DV_I_51_0_bit) != 0 { + mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) + } + if (mask & DV_I_51_2_bit) != 0 { + if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || + not(not((W[49]^W[51])&(1<<6))) != 0 || + not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || + not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { + mask &= ^DV_I_51_2_bit + } + } + if (mask & DV_I_52_0_bit) != 0 { + mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) + } + if (mask & DV_II_46_2_bit) != 0 { + mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) + } + if (mask & DV_II_48_0_bit) != 0 { + if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || + not((W[35]^(W[40]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_48_0_bit + } + } + if (mask & DV_II_49_0_bit) != 0 { + if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || + not((W[36]^(W[41]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_49_0_bit + } + } + if (mask & DV_II_49_2_bit) != 0 { + if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || + not(not((W[51]^W[53])&(1<<6))) != 0 || + not((W[50]^W[54])&(1<<1)) != 0 || + not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || + not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || + not((W[36]^(W[41]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_49_2_bit + } + } + if (mask & DV_II_50_0_bit) != 0 { + if not((W[55]^W[58])&(1<<29)) != 0 || + not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || + not((W[37]^(W[42]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_50_0_bit + } + } + if (mask & DV_II_50_2_bit) != 0 { + if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || + not(not((W[52]^W[54])&(1<<6))) != 0 || + not((W[51]^W[55])&(1<<1)) != 0 || + not((W[45]^W[47])&(1<<1)) != 0 || + not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || + not((W[37]^(W[42]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_50_2_bit + } + } + if (mask & DV_II_51_0_bit) != 0 { + if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || + not((W[38]^(W[43]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_51_0_bit + } + } + if (mask & DV_II_51_2_bit) != 0 { + if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || + not(not((W[53]^W[55])&(1<<6))) != 0 || + not((W[52]^W[56])&(1<<1)) != 0 || + not((W[46]^W[48])&(1<<1)) != 0 || + not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || + not((W[38]^(W[43]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_51_2_bit + } + } + if (mask & DV_II_52_0_bit) != 0 { + if not(not((W[59]^W[60])&(1<<29))) != 0 || + not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || + not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || + not((W[39]^(W[44]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_52_0_bit + } + } + if (mask & DV_II_53_0_bit) != 0 { + if not((W[58]^W[61])&(1<<29)) != 0 || + not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || + not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || + not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_53_0_bit + } + } + if (mask & DV_II_54_0_bit) != 0 { + if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || + not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || + not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_54_0_bit + } + } + if (mask & DV_II_55_0_bit) != 0 { + if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || + not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || + not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || + not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_55_0_bit + } + } + if (mask & DV_II_56_0_bit) != 0 { + if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || + not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || + not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_56_0_bit + } + } + } + + return mask +} + +func not(x uint32) uint32 { + if x == 0 { + return 1 + } + + return 0 +} + +//go:nosplit +func SHA1_dvs() []DvInfo { + return sha1_dvs +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go deleted file mode 100644 index 2e088979..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !noasm && gc && amd64 && !arm64 -// +build !noasm,gc,amd64,!arm64 - -package ubc - -func CalculateDvMaskAMD64(W [80]uint32) uint32 - -// Check takes as input an expanded message block and verifies the unavoidable bitconditions -// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all -// unavoidable bitconditions for that DV have been met. -// Thus, one needs to do the recompression check for each DV that has its bit set. -func CalculateDvMask(W [80]uint32) uint32 { - return CalculateDvMaskAMD64(W) -} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s deleted file mode 100644 index f5260735..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s +++ /dev/null @@ -1,1897 +0,0 @@ -// Code generated by command: go run asm.go -out ../ubc_amd64.s -pkg ubc. DO NOT EDIT. - -//go:build !noasm && gc && amd64 && !arm64 - -#include "textflag.h" - -// func CalculateDvMaskAMD64(W [80]uint32) uint32 -TEXT ·CalculateDvMaskAMD64(SB), NOSPLIT, $0-324 - MOVL $0xffffffff, AX - - // (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) - MOVL W_44+176(FP), CX - MOVL W_45+180(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xfd7c5f7f, CX - ANDL CX, AX - - // mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - MOVL W_49+196(FP), CX - MOVL W_50+200(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x3d7efff7, CX - ANDL CX, AX - - // mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - MOVL W_48+192(FP), CX - MOVL W_49+196(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x9f5f7ffb, CX - ANDL CX, AX - - // mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - MOVL W_47+188(FP), CX - MOVL W_50+200(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0x7dfedddf, CX - ANDL CX, AX - - // mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - MOVL W_47+188(FP), CX - MOVL W_48+192(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xcfcfdffd, CX - ANDL CX, AX - - // mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) - MOVL W_46+184(FP), CX - SHRL $0x04, CX - MOVL W_49+196(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xbf7f7777, CX - ANDL CX, AX - - // mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - MOVL W_46+184(FP), CX - MOVL W_47+188(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xe7e7f7fe, CX - ANDL CX, AX - - // mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) - MOVL W_45+180(FP), CX - SHRL $0x04, CX - MOVL W_48+192(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xdfdfdddb, CX - ANDL CX, AX - - // mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) - MOVL W_45+180(FP), CX - MOVL W_46+184(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xf5f57dff, CX - ANDL CX, AX - - // mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) - MOVL W_44+176(FP), CX - SHRL $0x04, CX - MOVL W_47+188(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xefeff775, CX - ANDL CX, AX - - // mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) - MOVL W_43+172(FP), CX - SHRL $0x04, CX - MOVL W_46+184(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xf7f7fdda, CX - ANDL CX, AX - - // mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) - MOVL W_43+172(FP), CX - MOVL W_44+176(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xff5ed7df, CX - ANDL CX, AX - - // mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) - MOVL W_42+168(FP), CX - SHRL $0x04, CX - MOVL W_45+180(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xfdfd7f75, CX - ANDL CX, AX - - // mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) - MOVL W_41+164(FP), CX - SHRL $0x04, CX - MOVL W_44+176(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xff7edfda, CX - ANDL CX, AX - - // mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) - MOVL W_40+160(FP), CX - MOVL W_41+164(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x7ff5ff5d, CX - ANDL CX, AX - - // mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - MOVL W_54+216(FP), CX - MOVL W_55+220(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x3f77dfff, CX - ANDL CX, AX - - // mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - MOVL W_53+212(FP), CX - MOVL W_54+216(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x9fddf7ff, CX - ANDL CX, AX - - // mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - MOVL W_52+208(FP), CX - MOVL W_53+212(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xcfeefdff, CX - ANDL CX, AX - - // mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) - MOVL W_50+200(FP), CX - MOVL W_53+212(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xdfed77ff, CX - ANDL CX, AX - - // mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) - MOVL W_50+200(FP), CX - MOVL W_51+204(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x75fdffdf, CX - ANDL CX, AX - - // mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) - MOVL W_49+196(FP), CX - MOVL W_52+208(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xeff6ddff, CX - ANDL CX, AX - - // mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) - MOVL W_48+192(FP), CX - MOVL W_51+204(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xf7fd777f, CX - ANDL CX, AX - - // mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - MOVL W_42+168(FP), CX - MOVL W_43+172(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xffcff5f7, CX - ANDL CX, AX - - // mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) - MOVL W_41+164(FP), CX - MOVL W_42+168(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xffe7fd7b, CX - ANDL CX, AX - - // mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) - MOVL W_40+160(FP), CX - MOVL W_43+172(FP), DX - SHRL $0x04, CX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x7fdff7f5, CX - ANDL CX, AX - - // mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) - MOVL W_39+156(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x04, CX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xbfeffdfa, CX - ANDL CX, AX - - // if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - // } - TESTL $0xa0080082, AX - JE f1 - MOVL W_38+152(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x04, CX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x5ff7ff7d, CX - ANDL CX, AX - -f1: - // mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - MOVL W_37+148(FP), CX - MOVL W_40+160(FP), DX - SHRL $0x04, CX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xaffdffde, CX - ANDL CX, AX - - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - // } - TESTL $0x82108000, AX - JE f2 - MOVL W_55+220(FP), CX - MOVL W_56+224(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0x7def7fff, CX - ANDL CX, AX - -f2: - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) - // } - TESTL $0x80908000, AX - JE f3 - MOVL W_52+208(FP), CX - MOVL W_55+220(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0x7f6f7fff, CX - ANDL CX, AX - -f3: - // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) - // } - TESTL $0x40282000, AX - JE f4 - MOVL W_51+204(FP), CX - MOVL W_54+216(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xbfd7dfff, CX - ANDL CX, AX - -f4: - // if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - // } - TESTL $0x18080080, AX - JE f5 - MOVL W_51+204(FP), CX - MOVL W_52+208(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xe7f7ff7f, CX - ANDL CX, AX - -f5: - // if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { - // mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) - // } - TESTL $0x00110208, AX - JE f6 - MOVL W_36+144(FP), CX - SHRL $0x04, CX - MOVL W_40+160(FP), DX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xffeefdf7, CX - ANDL CX, AX - -f6: - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { - // mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - // } - TESTL $0x00308000, AX - JE f7 - MOVL W_53+212(FP), CX - MOVL W_56+224(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffcf7fff, CX - ANDL CX, AX - -f7: - // if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) - // } - TESTL $0x000a0800, AX - JE f8 - MOVL W_51+204(FP), CX - MOVL W_54+216(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfff5f7ff, CX - ANDL CX, AX - -f8: - // if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) - // } - TESTL $0x00012200, AX - JE f9 - MOVL W_50+200(FP), CX - MOVL W_52+208(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfffeddff, CX - ANDL CX, AX - -f9: - // if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { - // mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) - // } - TESTL $0x00008880, AX - JE f10 - MOVL W_49+196(FP), CX - MOVL W_51+204(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffff777f, CX - ANDL CX, AX - -f10: - // if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { - // mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) - // } - TESTL $0x00002220, AX - JE f11 - MOVL W_48+192(FP), CX - MOVL W_50+200(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffffdddf, CX - ANDL CX, AX - -f11: - // if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { - // mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) - // } - TESTL $0x00000888, AX - JE f12 - MOVL W_47+188(FP), CX - MOVL W_49+196(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfffff777, CX - ANDL CX, AX - -f12: - // if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { - // mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) - // } - TESTL $0x00000224, AX - JE f13 - MOVL W_46+184(FP), CX - MOVL W_48+192(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfffffddb, CX - ANDL CX, AX - -f13: - // mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) - MOVL W_45+180(FP), CX - MOVL W_47+188(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - SUBL $0x00000040, CX - ORL $0xffffbbbf, CX - ANDL CX, AX - - // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { - // mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) - // } - TESTL $0x0000008a, AX - JE f14 - MOVL W_45+180(FP), CX - MOVL W_47+188(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffffff75, CX - ANDL CX, AX - -f14: - // mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) - MOVL W_44+176(FP), CX - MOVL W_46+184(FP), DX - XORL DX, CX - SHRL $0x06, CX - ANDL $0x00000001, CX - DECL CX - ORL $0xffffeeef, CX - ANDL CX, AX - - // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { - // mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) - // } - TESTL $0x00000025, AX - JE f15 - MOVL W_44+176(FP), CX - MOVL W_46+184(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffffffda, CX - ANDL CX, AX - -f15: - // mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) - MOVL W_41+164(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xfbfbfeff, CX - ANDL CX, AX - - // mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) - MOVL W_40+160(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xfeffbfbf, CX - ANDL CX, AX - - // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) - // } - TESTL $0x8000000a, AX - JE f16 - MOVL W_40+160(FP), CX - MOVL W_42+168(FP), DX - XORL DX, CX - SHRL $0x04, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0x7ffffff5, CX - ANDL CX, AX - -f16: - // mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) - MOVL W_39+156(FP), CX - MOVL W_40+160(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xffbfefef, CX - ANDL CX, AX - - // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) - // } - TESTL $0x40000005, AX - JE f17 - MOVL W_39+156(FP), CX - MOVL W_41+164(FP), DX - XORL DX, CX - SHRL $0x04, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xbffffffa, CX - ANDL CX, AX - -f17: - // if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - // } - TESTL $0xa0000002, AX - JE f18 - MOVL W_38+152(FP), CX - MOVL W_40+160(FP), DX - XORL DX, CX - SHRL $0x04, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0x5ffffffd, CX - ANDL CX, AX - -f18: - // if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - // } - TESTL $0x50000001, AX - JE f19 - MOVL W_37+148(FP), CX - MOVL W_39+156(FP), DX - XORL DX, CX - SHRL $0x04, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xaffffffe, CX - ANDL CX, AX - -f19: - // mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) - MOVL W_36+144(FP), CX - MOVL W_37+148(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xfffbefbf, CX - ANDL CX, AX - - // if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) - // } - TESTL $0x00080084, AX - JE f20 - MOVL W_35+140(FP), CX - MOVL W_39+156(FP), DX - SHRL $0x04, CX - SHRL $0x1d, DX - XORL DX, CX - ANDL $0x00000001, CX - SUBL $0x00000001, CX - ORL $0xfff7ff7b, CX - ANDL CX, AX - -f20: - // if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { - // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) - // } - TESTL $0x00100080, AX - JE f21 - MOVL W_63+252(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffefff7f, CX - ANDL CX, AX - -f21: - // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - // } - TESTL $0x00010004, AX - JE f22 - MOVL W_63+252(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xfffefffb, CX - ANDL CX, AX - -f22: - // if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) - // } - TESTL $0x00080020, AX - JE f23 - MOVL W_62+248(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfff7ffdf, CX - ANDL CX, AX - -f23: - // if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { - // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) - // } - TESTL $0x00020008, AX - JE f24 - MOVL W_61+244(FP), CX - MOVL W_62+248(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfffdfff7, CX - ANDL CX, AX - -f24: - // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) - MOVL W_61+244(FP), CX - MOVL W_62+248(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000004, CX - NEGL CX - ORL $0xfffbffef, CX - ANDL CX, AX - - // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - // } - TESTL $0x00010004, AX - JE f25 - MOVL W_60+240(FP), CX - MOVL W_61+244(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xfffefffb, CX - ANDL CX, AX - -f25: - // if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) - // } - TESTL $0x22000000, AX - JE f26 - MOVL W_58+232(FP), CX - MOVL W_59+236(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - SUBL $0x00000001, CX - ORL $0xddffffff, CX - ANDL CX, AX - -f26: - // if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) - // } - TESTL $0x10800000, AX - JE f27 - MOVL W_57+228(FP), CX - MOVL W_58+232(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - SUBL $0x00000001, CX - ORL $0xef7fffff, CX - ANDL CX, AX - -f27: - // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - // } - TESTL $0x28000000, AX - JE f28 - MOVL W_56+224(FP), CX - MOVL W_59+236(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xd7ffffff, CX - ANDL CX, AX - -f28: - // if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) - // } - TESTL $0x0a000000, AX - JE f29 - MOVL W_56+224(FP), CX - MOVL W_59+236(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xf5ffffff, CX - ANDL CX, AX - -f29: - // if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) - // } - TESTL $0x08200000, AX - JE f30 - MOVL W_56+224(FP), CX - MOVL W_57+228(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - SUBL $0x00000001, CX - ORL $0xf7dfffff, CX - ANDL CX, AX - -f30: - // if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) - // } - TESTL $0x12000000, AX - JE f31 - MOVL W_55+220(FP), CX - MOVL W_58+232(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xedffffff, CX - ANDL CX, AX - -f31: - // if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) - // } - TESTL $0x08800000, AX - JE f32 - MOVL W_54+216(FP), CX - MOVL W_57+228(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xf77fffff, CX - ANDL CX, AX - -f32: - // if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { - // mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) - // } - TESTL $0x02200000, AX - JE f33 - MOVL W_53+212(FP), CX - MOVL W_56+224(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xfddfffff, CX - ANDL CX, AX - -f33: - // mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - MOVL W_51+204(FP), CX - MOVL W_50+200(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xfffbefff, CX - ANDL CX, AX - - // mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - MOVL W_48+192(FP), CX - MOVL W_50+200(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - SUBL $0x00000040, CX - ORL $0xfffbefff, CX - ANDL CX, AX - - // if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { - // mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) - // } - TESTL $0x0000a000, AX - JE f34 - MOVL W_48+192(FP), CX - MOVL W_55+220(FP), DX - XORL DX, CX - SHRL $0x1d, CX - ANDL $0x00000001, CX - NEGL CX - ORL $0xffff5fff, CX - ANDL CX, AX - -f34: - // mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) - MOVL W_47+188(FP), CX - MOVL W_49+196(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - SUBL $0x00000040, CX - ORL $0xffffbbff, CX - ANDL CX, AX - - // mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) - MOVL W_48+192(FP), CX - MOVL W_47+188(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xfbffffbf, CX - ANDL CX, AX - - // mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) - MOVL W_46+184(FP), CX - MOVL W_48+192(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - SUBL $0x00000040, CX - ORL $0xffffeeff, CX - ANDL CX, AX - - // mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) - MOVL W_47+188(FP), CX - MOVL W_46+184(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xfeffffef, CX - ANDL CX, AX - - // mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) - MOVL W_44+176(FP), CX - MOVL W_45+180(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xffbfbfff, CX - ANDL CX, AX - - // mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) - MOVL W_43+172(FP), CX - MOVL W_45+180(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - SUBL $0x00000040, CX - ORL $0xfffffbbf, CX - ANDL CX, AX - - // mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) - MOVL W_42+168(FP), CX - MOVL W_44+176(FP), DX - XORL DX, CX - SHRL $0x06, CX - ANDL $0x00000001, CX - SUBL $0x00000001, CX - ORL $0xfffffeef, CX - ANDL CX, AX - - // mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) - MOVL W_43+172(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xfbfbffff, CX - ANDL CX, AX - - // mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) - MOVL W_42+168(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xfeffbfff, CX - ANDL CX, AX - - // mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) - MOVL W_41+164(FP), CX - MOVL W_40+160(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - SUBL $0x00000002, CX - ORL $0xffbfefff, CX - ANDL CX, AX - - // if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { - // mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) - // } - TESTL $0x02008000, AX - JE f35 - MOVL W_39+156(FP), CX - MOVL W_43+172(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xfdff7fff, CX - ANDL CX, AX - -f35: - // if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { - // mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) - // } - TESTL $0x00802000, AX - JE f36 - MOVL W_38+152(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xff7fdfff, CX - ANDL CX, AX - -f36: - // if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { - // mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) - // } - TESTL $0x00004100, AX - JE f37 - MOVL W_37+148(FP), CX - MOVL W_38+152(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xffffbeff, CX - ANDL CX, AX - -f37: - // if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { - // mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) - // } - TESTL $0x00200800, AX - JE f38 - MOVL W_37+148(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - SUBL $0x00000010, CX - ORL $0xffdff7ff, CX - ANDL CX, AX - -f38: - // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - // } - TESTL $0x28000000, AX - JE f39 - MOVL W_36+144(FP), CX - MOVL W_38+152(FP), DX - XORL DX, CX - ANDL $0x00000010, CX - NEGL CX - ORL $0xd7ffffff, CX - ANDL CX, AX - -f39: - // mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) - MOVL W_35+140(FP), CX - MOVL W_36+144(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - ORL $0xfffffbef, CX - ANDL CX, AX - - // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) - // } - TESTL $0x00082000, AX - JE f40 - MOVL W_35+140(FP), CX - MOVL W_39+156(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - SUBL $0x00000008, CX - ORL $0xfff7dfff, CX - ANDL CX, AX - -f40: - // if mask != 0 - TESTL $0x00000000, AX - JNE end - - // if (mask & DV_I_43_0_bit) != 0 { - // if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || - // not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || - // not((W[58]^(W[63]>>30))&(1<<0)) != 0 { - // mask &= ^DV_I_43_0_bit - // } - // } - BTL $0x00, AX - JNC f41_skip - MOVL W_61+244(FP), CX - MOVL W_62+248(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f41_in - MOVL W_59+236(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f41_in - MOVL W_58+232(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x1e, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - CMPL CX, $0x00000000 - JE f41_in - JMP f41_skip - -f41_in: - ANDL $0xfffffffe, AX - -f41_skip: - // if (mask & DV_I_44_0_bit) != 0 { - // if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || - // not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || - // not((W[59]^(W[64]>>30))&(1<<0)) != 0 { - // mask &= ^DV_I_44_0_bit - // } - // } - BTL $0x01, AX - JNC f42_skip - MOVL W_62+248(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f42_in - MOVL W_60+240(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f42_in - MOVL W_59+236(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x1e, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - CMPL CX, $0x00000000 - JE f42_in - JMP f42_skip - -f42_in: - ANDL $0xfffffffd, AX - -f42_skip: - // if (mask & DV_I_46_2_bit) != 0 { - // mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) - // } - BTL $0x04, AX - JNC f43 - MOVL W_40+160(FP), CX - MOVL W_42+168(FP), DX - XORL DX, CX - SHRL $0x02, CX - NOTL CX - ORL $0xffffffef, CX - ANDL CX, AX - -f43: - // if (mask & DV_I_47_2_bit) != 0 { - // if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || - // not(not((W[41]^W[43])&(1<<6))) != 0 { - // mask &= ^DV_I_47_2_bit - // } - // } - BTL $0x06, AX - JNC f44_skip - MOVL W_62+248(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000004, CX - NEGL CX - CMPL CX, $0x00000000 - JE f44_in - MOVL W_41+164(FP), CX - MOVL W_43+172(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f44_in - JMP f44_skip - -f44_in: - ANDL $0xffffffbf, AX - -f44_skip: - // if (mask & DV_I_48_2_bit) != 0 { - // if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || - // not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { - // mask &= ^DV_I_48_2_bit - // } - // } - BTL $0x08, AX - JNC f45_skip - MOVL W_63+252(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000004, CX - NEGL CX - CMPL CX, $0x00000000 - JE f45_in - MOVL W_48+192(FP), CX - MOVL W_49+196(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f45_in - JMP f45_skip - -f45_in: - ANDL $0xfffffeff, AX - -f45_skip: - // if (mask & DV_I_49_2_bit) != 0 { - // if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || - // not((W[42]^W[50])&(1<<1)) != 0 || - // not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || - // not((W[38]^W[40])&(1<<1)) != 0 { - // mask &= ^DV_I_49_2_bit - // } - // } - BTL $0x0a, AX - JNC f46_skip - MOVL W_49+196(FP), CX - MOVL W_50+200(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f46_in - MOVL W_42+168(FP), CX - MOVL W_50+200(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - CMPL CX, $0x00000000 - JE f46_in - MOVL W_39+156(FP), CX - MOVL W_40+160(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f46_in - MOVL W_38+152(FP), CX - MOVL W_40+160(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - CMPL CX, $0x00000000 - JE f46_in - JMP f46_skip - -f46_in: - ANDL $0xfffffbff, AX - -f46_skip: - // if (mask & DV_I_50_0_bit) != 0 { - // mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) - // } - BTL $0x0b, AX - JNC f47 - MOVL W_36+144(FP), CX - MOVL W_37+148(FP), DX - XORL DX, CX - SHLL $0x07, CX - ORL $0xfffff7ff, CX - ANDL CX, AX - -f47: - // if (mask & DV_I_50_2_bit) != 0 { - // mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) - // } - BTL $0x0c, AX - JNC f48 - MOVL W_43+172(FP), CX - MOVL W_51+204(FP), DX - XORL DX, CX - SHLL $0x0b, CX - ORL $0xffffefff, CX - ANDL CX, AX - -f48: - // if (mask & DV_I_51_0_bit) != 0 { - // mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) - // } - BTL $0x0d, AX - JNC f49 - MOVL W_37+148(FP), CX - MOVL W_38+152(FP), DX - XORL DX, CX - SHLL $0x09, CX - ORL $0xffffdfff, CX - ANDL CX, AX - -f49: - // if (mask & DV_I_51_2_bit) != 0 { - // if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || - // not(not((W[49]^W[51])&(1<<6))) != 0 || - // not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || - // not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { - // mask &= ^DV_I_51_2_bit - // } - // } - BTL $0x0e, AX - JNC f50_skip - MOVL W_51+204(FP), CX - MOVL W_52+208(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f50_in - MOVL W_49+196(FP), CX - MOVL W_51+204(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f50_in - MOVL W_37+148(FP), CX - MOVL W_37+148(FP), DX - SHRL $0x05, DX - XORL DX, CX - ANDL $0x00000002, CX - CMPL CX, $0x00000000 - JNE f50_in - MOVL W_35+140(FP), CX - MOVL W_39+156(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f50_in - JMP f50_skip - -f50_in: - ANDL $0xffffbfff, AX - -f50_skip: - // if (mask & DV_I_52_0_bit) != 0 { - // mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) - // } - BTL $0x0f, AX - JNC f51 - MOVL W_38+152(FP), CX - MOVL W_39+156(FP), DX - XORL DX, CX - SHLL $0x0b, CX - ORL $0xffff7fff, CX - ANDL CX, AX - -f51: - // if (mask & DV_II_46_2_bit) != 0 { - // mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) - // } - TESTL $0x00040000, AX - BTL $0x12, AX - JNC f52 - MOVL W_47+188(FP), CX - MOVL W_51+204(FP), DX - XORL DX, CX - SHLL $0x11, CX - ORL $0xfffbffff, CX - ANDL CX, AX - -f52: - // if (mask & DV_II_48_0_bit) != 0 { - // if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || - // not((W[35]^(W[40]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_48_0_bit - // } - // } - BTL $0x14, AX - JNC f53_skip - MOVL W_36+144(FP), CX - MOVL W_40+160(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f53_in - MOVL W_35+140(FP), CX - MOVL W_40+160(FP), DX - SHLL $0x02, DX - XORL DX, CX - ANDL $0x40000000, CX - CMPL CX, $0x00000000 - JNE f53_in - JMP f53_skip - -f53_in: - ANDL $0xffefffff, AX - -f53_skip: - // if (mask & DV_II_49_0_bit) != 0 { - // if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || - // not((W[36]^(W[41]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_49_0_bit - // } - // } - BTL $0x15, AX - JNC f54_skip - MOVL W_37+148(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f54_in - MOVL W_36+144(FP), CX - MOVL W_41+164(FP), DX - SHLL $0x02, DX - XORL DX, CX - ANDL $0x40000000, CX - CMPL CX, $0x00000000 - JNE f54_in - JMP f54_skip - -f54_in: - ANDL $0xffdfffff, AX - -f54_skip: - // if (mask & DV_II_49_2_bit) != 0 { - // if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || - // not(not((W[51]^W[53])&(1<<6))) != 0 || - // not((W[50]^W[54])&(1<<1)) != 0 || - // not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || - // not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || - // not((W[36]^(W[41]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_49_2_bit - // } - // } - BTL $0x16, AX - JNC f55_skip - MOVL W_53+212(FP), CX - MOVL W_54+216(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f55_in - MOVL W_51+204(FP), CX - MOVL W_53+212(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f55_in - MOVL W_50+200(FP), CX - MOVL W_54+216(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f55_in - MOVL W_45+180(FP), CX - MOVL W_46+184(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f55_in - MOVL W_37+148(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f55_in - MOVL W_36+144(FP), CX - MOVL W_41+164(FP), DX - SHRL $0x1e, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - CMPL CX, $0x00000000 - JE f55_in - JMP f55_skip - -f55_in: - ANDL $0xffbfffff, AX - -f55_skip: - // if (mask & DV_II_50_0_bit) != 0 { - // if not((W[55]^W[58])&(1<<29)) != 0 || - // not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || - // not((W[37]^(W[42]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_50_0_bit - // } - // } - BTL $0x17, AX - JNC f56_skip - MOVL W_55+220(FP), CX - MOVL W_58+232(FP), DX - XORL DX, CX - ANDL $0x20000000, CX - NEGL CX - CMPL CX, $0x00000000 - JE f56_in - MOVL W_38+152(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f56_in - MOVL W_37+148(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x02, DX - XORL DX, CX - ANDL $0x40000000, CX - NEGL CX - CMPL CX, $0x00000000 - JE f56_in - JMP f56_skip - -f56_in: - ANDL $0xff7fffff, AX - -f56_skip: - // if (mask & DV_II_50_2_bit) != 0 { - // if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || - // not(not((W[52]^W[54])&(1<<6))) != 0 || - // not((W[51]^W[55])&(1<<1)) != 0 || - // not((W[45]^W[47])&(1<<1)) != 0 || - // not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || - // not((W[37]^(W[42]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_50_2_bit - // } - // } - BTL $0x18, AX - JNC f57_skip - MOVL W_54+216(FP), CX - MOVL W_55+220(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f57_in - MOVL W_52+208(FP), CX - MOVL W_54+216(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f57_in - MOVL W_51+204(FP), CX - MOVL W_55+220(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f57_in - MOVL W_45+180(FP), CX - MOVL W_47+188(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f57_in - MOVL W_38+152(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f57_in - MOVL W_37+148(FP), CX - MOVL W_42+168(FP), DX - SHRL $0x1e, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - CMPL CX, $0x00000000 - JE f57_in - JMP f57_skip - -f57_in: - ANDL $0xfeffffff, AX - -f57_skip: - // if (mask & DV_II_51_0_bit) != 0 { - // if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || - // not((W[38]^(W[43]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_51_0_bit - // } - // } - BTL $0x19, AX - JNC f58_skip - MOVL W_39+156(FP), CX - MOVL W_43+172(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f58_in - MOVL W_38+152(FP), CX - MOVL W_43+172(FP), DX - SHLL $0x02, DX - XORL DX, CX - ANDL $0x40000000, CX - NEGL CX - CMPL CX, $0x00000000 - JE f58_in - JMP f58_skip - -f58_in: - ANDL $0xfdffffff, AX - -f58_skip: - // if (mask & DV_II_51_2_bit) != 0 { - // if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || - // not(not((W[53]^W[55])&(1<<6))) != 0 || - // not((W[52]^W[56])&(1<<1)) != 0 || - // not((W[46]^W[48])&(1<<1)) != 0 || - // not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || - // not((W[38]^(W[43]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_51_2_bit - // } - // } - BTL $0x1a, AX - JNC f59_skip - MOVL W_55+220(FP), CX - MOVL W_56+224(FP), DX - SHLL $0x05, DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f59_in - MOVL W_53+212(FP), CX - MOVL W_55+220(FP), DX - XORL DX, CX - ANDL $0x00000040, CX - CMPL CX, $0x00000000 - JNE f59_in - MOVL W_52+208(FP), CX - MOVL W_56+224(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f59_in - MOVL W_46+184(FP), CX - MOVL W_48+192(FP), DX - XORL DX, CX - ANDL $0x00000002, CX - NEGL CX - CMPL CX, $0x00000000 - JE f59_in - MOVL W_39+156(FP), CX - MOVL W_43+172(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000020, CX - CMPL CX, $0x00000000 - JNE f59_in - MOVL W_38+152(FP), CX - MOVL W_43+172(FP), DX - SHRL $0x1e, DX - XORL DX, CX - ANDL $0x00000001, CX - NEGL CX - CMPL CX, $0x00000000 - JE f59_in - JMP f59_skip - -f59_in: - ANDL $0xfbffffff, AX - -f59_skip: - // if (mask & DV_II_52_0_bit) != 0 { - // if not(not((W[59]^W[60])&(1<<29))) != 0 || - // not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || - // not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || - // not((W[39]^(W[44]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_52_0_bit - // } - // } - BTL $0x1b, AX - JNC f60_skip - MOVL W_59+236(FP), CX - MOVL W_60+240(FP), DX - XORL DX, CX - ANDL $0x20000000, CX - CMPL CX, $0x00000000 - JNE f60_in - MOVL W_40+160(FP), CX - MOVL W_44+176(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f60_in - MOVL W_40+160(FP), CX - MOVL W_44+176(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f60_in - MOVL W_39+156(FP), CX - MOVL W_44+176(FP), DX - SHLL $0x02, DX - XORL DX, CX - ANDL $0x40000000, CX - NEGL CX - CMPL CX, $0x00000000 - JE f60_in - JMP f60_skip - -f60_in: - ANDL $0xf7ffffff, AX - -f60_skip: - // if (mask & DV_II_53_0_bit) != 0 { - // if not((W[58]^W[61])&(1<<29)) != 0 || - // not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || - // not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || - // not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_53_0_bit - // } - // } - BTL $0x1c, AX - JNC f61_skip - MOVL W_58+232(FP), CX - MOVL W_61+244(FP), DX - XORL DX, CX - ANDL $0x20000000, CX - NEGL CX - CMPL CX, $0x00000000 - JE f61_in - MOVL W_57+228(FP), CX - MOVL W_61+244(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f61_in - MOVL W_41+164(FP), CX - MOVL W_45+180(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f61_in - MOVL W_41+164(FP), CX - MOVL W_45+180(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f61_in - JMP f61_skip - -f61_in: - ANDL $0xefffffff, AX - -f61_skip: - // if (mask & DV_II_54_0_bit) != 0 { - // if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || - // not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || - // not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_54_0_bit - // } - // } - BTL $0x1d, AX - JNC f62_skip - MOVL W_58+232(FP), CX - MOVL W_62+248(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f62_in - MOVL W_42+168(FP), CX - MOVL W_46+184(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f62_in - MOVL W_42+168(FP), CX - MOVL W_46+184(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f62_in - JMP f62_skip - -f62_in: - ANDL $0xdfffffff, AX - -f62_skip: - // if (mask & DV_II_55_0_bit) != 0 { - // if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || - // not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || - // not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || - // not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_55_0_bit - // } - // } - BTL $0x1e, AX - JNC f63_skip - MOVL W_59+236(FP), CX - MOVL W_63+252(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f63_in - MOVL W_57+228(FP), CX - MOVL W_59+236(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f63_in - MOVL W_43+172(FP), CX - MOVL W_47+188(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f63_in - MOVL W_43+172(FP), CX - MOVL W_47+188(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f63_in - JMP f63_skip - -f63_in: - ANDL $0xbfffffff, AX - -f63_skip: - // if (mask & DV_II_56_0_bit) != 0 { - // if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || - // not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || - // not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_56_0_bit - // } - // } - BTL $0x1f, AX - JNC f64_skip - MOVL W_60+240(FP), CX - MOVL W_64+256(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f64_in - MOVL W_44+176(FP), CX - MOVL W_48+192(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000008, CX - CMPL CX, $0x00000000 - JNE f64_in - MOVL W_44+176(FP), CX - MOVL W_48+192(FP), DX - SHRL $0x19, DX - XORL DX, CX - ANDL $0x00000010, CX - CMPL CX, $0x00000000 - JNE f64_in - JMP f64_skip - -f64_in: - ANDL $0x7fffffff, AX - -f64_skip: -end: - MOVL AX, ret+320(FP) - RET diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.go deleted file mode 100644 index 47b79fb1..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !noasm && gc && arm64 && !amd64 -// +build !noasm,gc,arm64,!amd64 - -package ubc - -func CalculateDvMaskARM64(W [80]uint32) uint32 - -// Check takes as input an expanded message block and verifies the unavoidable -// bitconditions for all listed DVs. It returns a dvmask where each bit belonging -// to a DV is set if all unavoidable bitconditions for that DV have been met. -func CalculateDvMask(W [80]uint32) uint32 { - return CalculateDvMaskARM64(W) -} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.s b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.s deleted file mode 100644 index 68ee8fe5..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_arm64.s +++ /dev/null @@ -1,1851 +0,0 @@ -//go:build !noasm && gc && arm64 && !amd64 -// +build !noasm,gc,arm64,!amd64 - -#include "textflag.h" - -// func CalculateDvMaskARM64(W [80]uint32) uint32 -TEXT ·CalculateDvMaskARM64(SB), NOSPLIT, $0-324 - MOVW $0xffffffff, R0 - - // (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) - MOVW W_44+176(FP), R1 - MOVW W_45+180(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xfd7c5f7f, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - MOVW W_49+196(FP), R1 - MOVW W_50+200(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x3d7efff7, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - MOVW W_48+192(FP), R1 - MOVW W_49+196(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x9f5f7ffb, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - MOVW W_47+188(FP), R1 - MOVW W_50+200(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0x7dfedddf, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - MOVW W_47+188(FP), R1 - MOVW W_48+192(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xcfcfdffd, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) - MOVW W_46+184(FP), R1 - LSR $0x04, R1, R1 - MOVW W_49+196(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xbf7f7777, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - MOVW W_46+184(FP), R1 - MOVW W_47+188(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xe7e7f7fe, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) - MOVW W_45+180(FP), R1 - LSR $0x04, R1, R1 - MOVW W_48+192(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xdfdfdddb, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) - MOVW W_45+180(FP), R1 - MOVW W_46+184(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xf5f57dff, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) - MOVW W_44+176(FP), R1 - LSR $0x04, R1, R1 - MOVW W_47+188(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xefeff775, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) - MOVW W_43+172(FP), R1 - LSR $0x04, R1, R1 - MOVW W_46+184(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xf7f7fdda, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) - MOVW W_43+172(FP), R1 - MOVW W_44+176(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xff5ed7df, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) - MOVW W_42+168(FP), R1 - LSR $0x04, R1, R1 - MOVW W_45+180(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xfdfd7f75, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) - MOVW W_41+164(FP), R1 - LSR $0x04, R1, R1 - MOVW W_44+176(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xff7edfda, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) - MOVW W_40+160(FP), R1 - MOVW W_41+164(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x7ff5ff5d, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - MOVW W_54+216(FP), R1 - MOVW W_55+220(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x3f77dfff, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - MOVW W_53+212(FP), R1 - MOVW W_54+216(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x9fddf7ff, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - MOVW W_52+208(FP), R1 - MOVW W_53+212(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xcfeefdff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) - MOVW W_50+200(FP), R1 - MOVW W_53+212(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xdfed77ff, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) - MOVW W_50+200(FP), R1 - MOVW W_51+204(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x75fdffdf, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) - MOVW W_49+196(FP), R1 - MOVW W_52+208(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xeff6ddff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) - MOVW W_48+192(FP), R1 - MOVW W_51+204(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xf7fd777f, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - MOVW W_42+168(FP), R1 - MOVW W_43+172(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xffcff5f7, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) - MOVW W_41+164(FP), R1 - MOVW W_42+168(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xffe7fd7b, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) - MOVW W_40+160(FP), R1 - LSR $0x04, R1, R1 - MOVW W_43+172(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x7fdff7f5, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) - MOVW W_39+156(FP), R1 - LSR $0x04, R1, R1 - MOVW W_42+168(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xbfeffdfa, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - // } - TST $0xa0080082, R0 - BEQ f1 - MOVW W_38+152(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x04, R1, R1 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x5ff7ff7d, R1, R1 - AND R1, R0, R0 - -f1: - // mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - MOVW W_37+148(FP), R1 - MOVW W_40+160(FP), R2 - LSR $0x04, R1, R1 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xaffdffde, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - // } - TST $0x82108000, R0 - BEQ f2 - MOVW W_55+220(FP), R1 - MOVW W_56+224(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0x7def7fff, R1, R1 - AND R1, R0, R0 - -f2: - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) - // } - TST $0x80908000, R0 - BEQ f3 - MOVW W_52+208(FP), R1 - MOVW W_55+220(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0x7f6f7fff, R1, R1 - AND R1, R0, R0 - -f3: - // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) - // } - TST $0x40282000, R0 - BEQ f4 - MOVW W_51+204(FP), R1 - MOVW W_54+216(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xbfd7dfff, R1, R1 - AND R1, R0, R0 - -f4: - // if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - // } - TST $0x18080080, R0 - BEQ f5 - MOVW W_51+204(FP), R1 - MOVW W_52+208(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xe7f7ff7f, R1, R1 - AND R1, R0, R0 - -f5: - // if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { - // mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) - // } - TST $0x00110208, R0 - BEQ f6 - MOVW W_36+144(FP), R1 - LSR $0x04, R1, R1 - MOVW W_40+160(FP), R2 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xffeefdf7, R1, R1 - AND R1, R0, R0 - -f6: - // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { - // mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - // } - TST $0x00308000, R0 - BEQ f7 - MOVW W_53+212(FP), R1 - MOVW W_56+224(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffcf7fff, R1, R1 - AND R1, R0, R0 - -f7: - // if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) - // } - TST $0x000a0800, R0 - BEQ f8 - MOVW W_51+204(FP), R1 - MOVW W_54+216(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfff5f7ff, R1, R1 - AND R1, R0, R0 - -f8: - // if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) - // } - TST $0x00012200, R0 - BEQ f9 - MOVW W_50+200(FP), R1 - MOVW W_52+208(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfffeddff, R1, R1 - AND R1, R0, R0 - -f9: - // if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { - // mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) - // } - TST $0x00008880, R0 - BEQ f10 - MOVW W_49+196(FP), R1 - MOVW W_51+204(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffff777f, R1, R1 - AND R1, R0, R0 - -f10: - // if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { - // mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) - // } - TST $0x00002220, R0 - BEQ f11 - MOVW W_48+192(FP), R1 - MOVW W_50+200(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffffdddf, R1, R1 - AND R1, R0, R0 - -f11: - // if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { - // mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) - // } - TST $0x00000888, R0 - BEQ f12 - MOVW W_47+188(FP), R1 - MOVW W_49+196(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfffff777, R1, R1 - AND R1, R0, R0 - -f12: - // if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { - // mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) - // } - TST $0x00000224, R0 - BEQ f13 - MOVW W_46+184(FP), R1 - MOVW W_48+192(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfffffddb, R1, R1 - AND R1, R0, R0 - -f13: - // mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) - MOVW W_45+180(FP), R1 - MOVW W_47+188(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - SUB $0x00000040, R1, R1 - ORR $0xffffbbbf, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { - // mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) - // } - TST $0x0000008a, R0 - BEQ f14 - MOVW W_45+180(FP), R1 - MOVW W_47+188(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffffff75, R1, R1 - AND R1, R0, R0 - -f14: - // mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) - MOVW W_44+176(FP), R1 - MOVW W_46+184(FP), R2 - EOR R2, R1, R1 - LSR $0x06, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xffffeeef, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { - // mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) - // } - TST $0x00000025, R0 - BEQ f15 - MOVW W_44+176(FP), R1 - MOVW W_46+184(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffffffda, R1, R1 - AND R1, R0, R0 - -f15: - // mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) - MOVW W_41+164(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xfbfbfeff, R1, R1 - AND R1, R0, R0 - - // mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) - MOVW W_40+160(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xfeffbfbf, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) - // } - TST $0x8000000a, R0 - BEQ f16 - MOVW W_40+160(FP), R1 - MOVW W_42+168(FP), R2 - EOR R2, R1, R1 - LSR $0x04, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0x7ffffff5, R1, R1 - AND R1, R0, R0 - -f16: - // mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) - MOVW W_39+156(FP), R1 - MOVW W_40+160(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xffbfefef, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) - // } - TST $0x40000005, R0 - BEQ f17 - MOVW W_39+156(FP), R1 - MOVW W_41+164(FP), R2 - EOR R2, R1, R1 - LSR $0x04, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xbffffffa, R1, R1 - AND R1, R0, R0 - -f17: - // if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - // mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - // } - TST $0xa0000002, R0 - BEQ f18 - MOVW W_38+152(FP), R1 - MOVW W_40+160(FP), R2 - EOR R2, R1, R1 - LSR $0x04, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0x5ffffffd, R1, R1 - AND R1, R0, R0 - -f18: - // if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { - // mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - // } - TST $0x50000001, R0 - BEQ f19 - MOVW W_37+148(FP), R1 - MOVW W_39+156(FP), R2 - EOR R2, R1, R1 - LSR $0x04, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xaffffffe, R1, R1 - AND R1, R0, R0 - -f19: - // mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) - MOVW W_36+144(FP), R1 - MOVW W_37+148(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xfffbefbf, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) - // } - TST $0x00080084, R0 - BEQ f20 - MOVW W_35+140(FP), R1 - MOVW W_39+156(FP), R2 - LSR $0x04, R1, R1 - LSR $0x1d, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - SUB $0x00000001, R1, R1 - ORR $0xfff7ff7b, R1, R1 - AND R1, R0, R0 - -f20: - // if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { - // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) - // } - TST $0x00100080, R0 - BEQ f21 - MOVW W_63+252(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffefff7f, R1, R1 - AND R1, R0, R0 - -f21: - // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - // } - TST $0x00010004, R0 - BEQ f22 - MOVW W_63+252(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xfffefffb, R1, R1 - AND R1, R0, R0 - -f22: - // if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) - // } - TST $0x00080020, R0 - BEQ f23 - MOVW W_62+248(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfff7ffdf, R1, R1 - AND R1, R0, R0 - -f23: - // if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { - // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) - // } - TST $0x00020008, R0 - BEQ f24 - MOVW W_61+244(FP), R1 - MOVW W_62+248(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfffdfff7, R1, R1 - AND R1, R0, R0 - -f24: - // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) - MOVW W_61+244(FP), R1 - MOVW W_62+248(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000004, R1, R1 - NEG R1, R1 - ORR $0xfffbffef, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - // mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - // } - TST $0x00010004, R0 - BEQ f25 - MOVW W_60+240(FP), R1 - MOVW W_61+244(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xfffefffb, R1, R1 - AND R1, R0, R0 - -f25: - // if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) - // } - TST $0x22000000, R0 - BEQ f26 - MOVW W_58+232(FP), R1 - MOVW W_59+236(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xddffffff, R1, R1 - AND R1, R0, R0 - -f26: - // if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) - // } - TST $0x10800000, R0 - BEQ f27 - MOVW W_57+228(FP), R1 - MOVW W_58+232(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $0x00000001, R1, R1 - ORR $0xef7fffff, R1, R1 - AND R1, R0, R0 - -f27: - // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - // } - TST $0x28000000, R0 - BEQ f28 - MOVW W_56+224(FP), R1 - MOVW W_59+236(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xd7ffffff, R1, R1 - AND R1, R0, R0 - -f28: - // if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) - // } - TST $0x0a000000, R0 - BEQ f29 - MOVW W_56+224(FP), R1 - MOVW W_59+236(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xf5ffffff, R1, R1 - AND R1, R0, R0 - -f29: - // if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) - // } - TST $0x08200000, R0 - BEQ f30 - MOVW W_56+224(FP), R1 - MOVW W_57+228(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - SUB $0x00000001, R1, R1 - ORR $0xf7dfffff, R1, R1 - AND R1, R0, R0 - -f30: - // if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { - // mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) - // } - TST $0x12000000, R0 - BEQ f31 - MOVW W_55+220(FP), R1 - MOVW W_58+232(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xedffffff, R1, R1 - AND R1, R0, R0 - -f31: - // if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { - // mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) - // } - TST $0x08800000, R0 - BEQ f32 - MOVW W_54+216(FP), R1 - MOVW W_57+228(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xf77fffff, R1, R1 - AND R1, R0, R0 - -f32: - // if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { - // mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) - // } - TST $0x02200000, R0 - BEQ f33 - MOVW W_53+212(FP), R1 - MOVW W_56+224(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xfddfffff, R1, R1 - AND R1, R0, R0 - -f33: - // mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - MOVW W_51+204(FP), R1 - MOVW W_50+200(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xfffbefff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - MOVW W_48+192(FP), R1 - MOVW W_50+200(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - SUB $0x00000040, R1, R1 - ORR $0xfffbefff, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { - // mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) - // } - TST $0x0000a000, R0 - BEQ f34 - MOVW W_48+192(FP), R1 - MOVW W_55+220(FP), R2 - EOR R2, R1, R1 - LSR $0x1d, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - ORR $0xffff5fff, R1, R1 - AND R1, R0, R0 - -f34: - // mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) - MOVW W_47+188(FP), R1 - MOVW W_49+196(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - SUB $0x00000040, R1, R1 - ORR $0xffffbbff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) - MOVW W_48+192(FP), R1 - MOVW W_47+188(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xfbffffbf, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) - MOVW W_46+184(FP), R1 - MOVW W_48+192(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - SUB $0x00000040, R1, R1 - ORR $0xffffeeff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) - MOVW W_47+188(FP), R1 - MOVW W_46+184(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xfeffffef, R1, R1 - AND R1, R0, R0 - - // mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) - MOVW W_44+176(FP), R1 - MOVW W_45+180(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xffbfbfff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) - MOVW W_43+172(FP), R1 - MOVW W_45+180(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - SUB $0x00000040, R1, R1 - ORR $0xfffffbbf, R1, R1 - AND R1, R0, R0 - - // mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) - MOVW W_42+168(FP), R1 - MOVW W_44+176(FP), R2 - EOR R2, R1, R1 - LSR $0x06, R1, R1 - AND $0x00000001, R1, R1 - SUB $1, R1, R1 - ORR $0xfffffeef, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) - MOVW W_43+172(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xfbfbffff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) - MOVW W_42+168(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xfeffbfff, R1, R1 - AND R1, R0, R0 - - // mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) - MOVW W_41+164(FP), R1 - MOVW W_40+160(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - SUB $0x00000002, R1, R1 - ORR $0xffbfefff, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { - // mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) - // } - TST $0x02008000, R0 - BEQ f35 - MOVW W_39+156(FP), R1 - MOVW W_43+172(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xfdff7fff, R1, R1 - AND R1, R0, R0 - -f35: - // if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { - // mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) - // } - TST $0x00802000, R0 - BEQ f36 - MOVW W_38+152(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xff7fdfff, R1, R1 - AND R1, R0, R0 - -f36: - // if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { - // mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) - // } - TST $0x00004100, R0 - BEQ f37 - MOVW W_37+148(FP), R1 - MOVW W_38+152(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xffffbeff, R1, R1 - AND R1, R0, R0 - -f37: - // if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { - // mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) - // } - TST $0x00200800, R0 - BEQ f38 - MOVW W_37+148(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - SUB $0x00000010, R1, R1 - ORR $0xffdff7ff, R1, R1 - AND R1, R0, R0 - -f38: - // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - // mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - // } - TST $0x28000000, R0 - BEQ f39 - MOVW W_36+144(FP), R1 - MOVW W_38+152(FP), R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - NEG R1, R1 - ORR $0xd7ffffff, R1, R1 - AND R1, R0, R0 - -f39: - // mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) - MOVW W_35+140(FP), R1 - MOVW W_36+144(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - ORR $0xfffffbef, R1, R1 - AND R1, R0, R0 - - // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { - // mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) - // } - TST $0x00082000, R0 - BEQ f40 - MOVW W_35+140(FP), R1 - MOVW W_39+156(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - SUB $0x00000008, R1, R1 - ORR $0xfff7dfff, R1, R1 - AND R1, R0, R0 - -f40: - // if mask != 0 - TST $0x00000000, R0 - BNE end - - // if (mask & DV_I_43_0_bit) != 0 { - // if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || - // not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || - // not((W[58]^(W[63]>>30))&(1<<0)) != 0 { - // mask &= ^DV_I_43_0_bit - // } - // } - TBZ $0, R0, f41_skip - MOVW W_61+244(FP), R1 - MOVW W_62+248(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - CBZ R1, f41_in - MOVW W_59+236(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000020, R1, R1 - CBNZ R1, f41_in - MOVW W_58+232(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x1e, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - CBZ R1, f41_in - B f41_skip - -f41_in: - AND $0xfffffffe, R0, R0 - -f41_skip: - // if (mask & DV_I_44_0_bit) != 0 { - // if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || - // not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || - // not((W[59]^(W[64]>>30))&(1<<0)) != 0 { - // mask &= ^DV_I_44_0_bit - // } - // } - TBZ $0x01, R0, f42_skip - MOVW W_62+248(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - NEG R1, R1 - CBZ R1, f42_in - MOVW W_60+240(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000020, R1, R1 - CBNZ R1, f42_in - MOVW W_59+236(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x1e, R2, R2 - EOR R2, R1, R1 - AND $0x00000001, R1, R1 - NEG R1, R1 - CBZ R1, f42_in - B f42_skip - -f42_in: - AND $0xfffffffd, R0, R0 - -f42_skip: - // if (mask & DV_I_46_2_bit) != 0 { - // mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) - // } - TBZ $0x04, R0, f43 - MOVW W_40+160(FP), R1 - MOVW W_42+168(FP), R2 - EOR R2, R1, R1 - LSR $0x02, R1, R1 - MVN R1, R1 - ORR $0xffffffef, R1, R1 - AND R1, R0, R0 - -f43: - // if (mask & DV_I_47_2_bit) != 0 { - // if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || - // not(not((W[41]^W[43])&(1<<6))) != 0 { - // mask &= ^DV_I_47_2_bit - // } - // } - TBZ $6, R0, f44_skip - MOVW W_62+248(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000004, R1, R1 - NEG R1, R1 - CBZ R1, f44_in - MOVW W_41+164(FP), R1 - MOVW W_43+172(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - CBNZ R1, f44_in - B f44_skip - -f44_in: - AND $0xffffffbf, R0, R0 - -f44_skip: - // if (mask & DV_I_48_2_bit) != 0 { - // if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || - // not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { - // mask &= ^DV_I_48_2_bit - // } - // } - TBZ $0x08, R0, f45_skip - MOVW W_63+252(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000004, R1, R1 - NEG R1, R1 - CBZ R1, f45_in - MOVW W_48+192(FP), R1 - MOVW W_49+196(FP), R2 - LSL $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - CBNZ R1, f45_in - B f45_skip - -f45_in: - AND $0xfffffeff, R0, R0 - -f45_skip: - // if (mask & DV_I_49_2_bit) != 0 { - // if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || - // not((W[42]^W[50])&(1<<1)) != 0 || - // not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || - // not((W[38]^W[40])&(1<<1)) != 0 { - // mask &= ^DV_I_49_2_bit - // } - // } - TBZ $0x0a, R0, f46_skip // Test bit 10 of R0, skip if clear - MOVW W_49+196(FP), R1 // R1 = W_49 - MOVW W_50+200(FP), R2 // R2 = W_50 - LSL $0x05, R2, R2 // R2 = W_50 << 5 - EOR R2, R1, R1 // R1 ^= R2 - AND $0x00000040, R1, R1 // R1 &= 0x40 - CBNZ R1, f46_in // If non-zero, jump to f46_in - - MOVW W_42+168(FP), R1 - MOVW W_50+200(FP), R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - CBZ R1, f46_in - - MOVW W_39+156(FP), R1 - MOVW W_40+160(FP), R2 - LSL $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - CBNZ R1, f46_in - - MOVW W_38+152(FP), R1 - MOVW W_40+160(FP), R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - CBZ R1, f46_in - B f46_skip - -f46_in: - AND $0xfffffbff, R0, R0 - -f46_skip: - // if (mask & DV_I_50_0_bit) != 0 { - // mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) - // } - TBZ $0x0b, R0, f47 // Test bit 11 (DV_I_50_0_bit) - MOVW W_36+144(FP), R1 - MOVW W_37+148(FP), R2 - EOR R2, R1, R1 // R1 = W[36] ^ W[37] - LSL $0x07, R1, R1 // R1 <<= 7 - ORR $0xfffff7ff, R1, R1 // R1 |= ~DV_I_50_0_bit - AND R1, R0, R0 // mask &= R1 - -f47: - // if (mask & DV_I_50_2_bit) != 0 { - // mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) - // } - TBZ $0x0c, R0, f48 // Test bit 12 (DV_I_50_2_bit) - MOVW W_43+172(FP), R1 - MOVW W_51+204(FP), R2 - EOR R2, R1, R1 // R1 = W[43] ^ W[51] - LSL $0x0b, R1, R1 // R1 <<= 11 - ORR $0xffffefff, R1, R1 // R1 |= ~DV_I_50_2_bit - AND R1, R0, R0 // mask &= R1 - -f48: - // if (mask & DV_I_51_0_bit) != 0 { - // mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) - // } - TBZ $0x0d, R0, f49 // Test bit 13 (DV_I_51_0_bit) - MOVW W_37+148(FP), R1 - MOVW W_38+152(FP), R2 - EOR R2, R1, R1 // R1 = W[37] ^ W[38] - LSL $0x09, R1, R1 // R1 <<= 9 - ORR $0xffffdfff, R1, R1 // R1 |= ~DV_I_51_0_bit - AND R1, R0, R0 // mask &= R1 - -f49: - // if (mask & DV_I_51_2_bit) != 0 { - // if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || - // not(not((W[49]^W[51])&(1<<6))) != 0 || - // not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || - // not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { - // mask &= ^DV_I_51_2_bit - // } - // } - TBZ $0x0e, R0, f50_skip // Test bit 14 (DV_I_51_2_bit) - - MOVW W_51+204(FP), R1 - MOVW W_52+208(FP), R2 - LSL $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - CBNZ R1, f50_in - - MOVW W_49+196(FP), R1 - MOVW W_51+204(FP), R2 - EOR R2, R1, R1 - AND $0x00000040, R1, R1 - CBNZ R1, f50_in - - MOVW W_37+148(FP), R1 - MOVW W_37+148(FP), R2 - LSR $0x05, R2, R2 - EOR R2, R1, R1 - AND $0x00000002, R1, R1 - CBNZ R1, f50_in - - MOVW W_35+140(FP), R1 - MOVW W_39+156(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000020, R1, R1 - CBNZ R1, f50_in - - B f50_skip - -f50_in: - AND $0xffffbfff, R0, R0 // mask &= ~DV_I_51_2_bit - -f50_skip: - // if (mask & DV_I_52_0_bit) != 0 { - // mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) - // } - TBZ $0x0f, R0, f51 // Test bit 15 (DV_I_52_0_bit) - MOVW W_38+152(FP), R1 - MOVW W_39+156(FP), R2 - EOR R2, R1, R1 - LSL $0x0b, R1, R1 - ORR $0xffff7fff, R1, R1 - AND R1, R0, R0 - -f51: - // if (mask & DV_II_46_2_bit) != 0 { - // mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) - // } - TST $0x00040000, R0 - TBZ $0x12, R0, f52 - MOVW W_47+188(FP), R1 - MOVW W_51+204(FP), R2 - EOR R2, R1, R1 - LSL $0x11, R1, R1 - ORR $0xfffbffff, R1, R1 - AND R1, R0, R0 - -f52: - // if (mask & DV_II_48_0_bit) != 0 { - // if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || - // not((W[35]^(W[40]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_48_0_bit - // } - // } - TBZ $0x14, R0, f53_skip // Test bit 20 (DV_II_48_0_bit) - - MOVW W_36+144(FP), R1 - MOVW W_40+160(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f53_in - - MOVW W_35+140(FP), R1 - MOVW W_40+160(FP), R2 - LSL $0x02, R2, R2 - EOR R2, R1, R1 - AND $0x40000000, R1, R1 - CBNZ R1, f53_in - - B f53_skip - -f53_in: - AND $0xffefffff, R0, R0 - -f53_skip: - // if (mask & DV_II_49_0_bit) != 0 { - // if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || - // not((W[36]^(W[41]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_49_0_bit - // } - // } - TBZ $0x15, R0, f54_skip // Test bit 21 (DV_II_49_0_bit) - - MOVW W_37+148(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f54_in - - MOVW W_36+144(FP), R1 - MOVW W_41+164(FP), R2 - LSL $0x02, R2, R2 - EOR R2, R1, R1 - AND $0x40000000, R1, R1 - CBNZ R1, f54_in - - B f54_skip - -f54_in: - AND $0xffdfffff, R0, R0 // mask &= ~DV_II_49_0_bit - -f54_skip: - // if (mask & DV_II_49_2_bit) != 0 { - // if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || - // not(not((W[51]^W[53])&(1<<6))) != 0 || - // not((W[50]^W[54])&(1<<1)) != 0 || - // not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || - // not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || - // not((W[36]^(W[41]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_49_2_bit - // } - // } - TBZ $0x16, R0, f55_skip - - MOVW W_53+212(FP), R1 - MOVW W_54+216(FP), R2 - LSL $0x05, R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f55_in - MOVW W_51+204(FP), R1 - MOVW W_53+212(FP), R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f55_in - MOVW W_50+200(FP), R1 - MOVW W_54+216(FP), R2 - EOR R2, R1 - AND $0x00000002, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f55_in - MOVW W_45+180(FP), R1 - MOVW W_46+184(FP), R2 - LSL $0x05, R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f55_in - MOVW W_37+148(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x19, R2 - EOR R2, R1 - AND $0x00000020, R1 - CMP $0x00000000, R1 - BNE f55_in - MOVW W_36+144(FP), R1 - MOVW W_41+164(FP), R2 - LSR $0x1e, R2 - EOR R2, R1 - AND $0x00000001, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f55_in - JMP f55_skip - -f55_in: - AND $0xffbfffff, R0 - -f55_skip: - // if (mask & DV_II_50_0_bit) != 0 { - // if not((W[55]^W[58])&(1<<29)) != 0 || - // not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || - // not((W[37]^(W[42]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_50_0_bit - // } - // } - TBZ $0x17, R0, f56_skip - - MOVW W_55+220(FP), R1 - MOVW W_58+232(FP), R2 - EOR R2, R1 - AND $0x20000000, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f56_in - MOVW W_38+152(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x19, R2 - EOR R2, R1 - AND $0x00000008, R1 - CMP $0x00000000, R1 - BNE f56_in - MOVW W_37+148(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x02, R2 - EOR R2, R1 - AND $0x40000000, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f56_in - JMP f56_skip - -f56_in: - AND $0xff7fffff, R0 - -f56_skip: - // if (mask & DV_II_50_2_bit) != 0 { - // if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || - // not(not((W[52]^W[54])&(1<<6))) != 0 || - // not((W[51]^W[55])&(1<<1)) != 0 || - // not((W[45]^W[47])&(1<<1)) != 0 || - // not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || - // not((W[37]^(W[42]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_50_2_bit - // } - // } - TBZ $0x18, R0, f57_skip - - MOVW W_54+216(FP), R1 - MOVW W_55+220(FP), R2 - LSL $0x05, R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f57_in - MOVW W_52+208(FP), R1 - MOVW W_54+216(FP), R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f57_in - MOVW W_51+204(FP), R1 - MOVW W_55+220(FP), R2 - EOR R2, R1 - AND $0x00000002, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f57_in - MOVW W_45+180(FP), R1 - MOVW W_47+188(FP), R2 - EOR R2, R1 - AND $0x00000002, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f57_in - MOVW W_38+152(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x19, R2 - EOR R2, R1 - AND $0x00000020, R1 - CMP $0x00000000, R1 - BNE f57_in - MOVW W_37+148(FP), R1 - MOVW W_42+168(FP), R2 - LSR $0x1e, R2 - EOR R2, R1 - AND $0x00000001, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f57_in - JMP f57_skip - -f57_in: - AND $0xfeffffff, R0 - -f57_skip: - // if (mask & DV_II_51_0_bit) != 0 { - // if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || - // not((W[38]^(W[43]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_51_0_bit - // } - // } - TBZ $0x19, R0, f58_skip - - MOVW W_39+156(FP), R1 - MOVW W_43+172(FP), R2 - LSR $0x19, R2 - EOR R2, R1 - AND $0x00000008, R1 - CMP $0x00000000, R1 - BNE f58_in - MOVW W_38+152(FP), R1 - MOVW W_43+172(FP), R2 - LSL $0x02, R2 - EOR R2, R1 - AND $0x40000000, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f58_in - JMP f58_skip - -f58_in: - AND $0xfdffffff, R0 - -f58_skip: - // if (mask & DV_II_51_2_bit) != 0 { - // if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || - // not(not((W[53]^W[55])&(1<<6))) != 0 || - // not((W[52]^W[56])&(1<<1)) != 0 || - // not((W[46]^W[48])&(1<<1)) != 0 || - // not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || - // not((W[38]^(W[43]>>30))&(1<<0)) != 0 { - // mask &= ^DV_II_51_2_bit - // } - // } - TBZ $0x1a, R0, f59_skip - - MOVW W_55+220(FP), R1 - MOVW W_56+224(FP), R2 - LSL $0x05, R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f59_in - MOVW W_53+212(FP), R1 - MOVW W_55+220(FP), R2 - EOR R2, R1 - AND $0x00000040, R1 - CMP $0x00000000, R1 - BNE f59_in - MOVW W_52+208(FP), R1 - MOVW W_56+224(FP), R2 - EOR R2, R1 - AND $0x00000002, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f59_in - MOVW W_46+184(FP), R1 - MOVW W_48+192(FP), R2 - EOR R2, R1 - AND $0x00000002, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f59_in - MOVW W_39+156(FP), R1 - MOVW W_43+172(FP), R2 - LSR $0x19, R2 - EOR R2, R1 - AND $0x00000020, R1 - CMP $0x00000000, R1 - BNE f59_in - MOVW W_38+152(FP), R1 - MOVW W_43+172(FP), R2 - LSR $0x1e, R2 - EOR R2, R1 - AND $0x00000001, R1 - NEG R1, R1 - CMP $0x00000000, R1 - BEQ f59_in - JMP f59_skip - -f59_in: - AND $0xfbffffff, R0 - -f59_skip: - // if (mask & DV_II_52_0_bit) != 0 { - // if not(not((W[59]^W[60])&(1<<29))) != 0 || - // not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || - // not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || - // not((W[39]^(W[44]<<2))&(1<<30)) != 0 { - // mask &= ^DV_II_52_0_bit - // } - // } - TBZ $0x1b, R0, f60_skip - MOVW W_59+236(FP), R1 - MOVW W_60+240(FP), R2 - EOR R2, R1, R1 - AND $0x20000000, R1, R1 - CBNZ R1, f60_in - MOVW W_40+160(FP), R1 - MOVW W_44+176(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f60_in - MOVW W_40+160(FP), R1 - MOVW W_44+176(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f60_in - MOVW W_39+156(FP), R1 - MOVW W_44+176(FP), R2 - LSL $0x02, R2, R2 - EOR R2, R1, R1 - AND $0x40000000, R1, R1 - NEG R1, R1 - CBZ R1, f60_in - B f60_skip - -f60_in: - AND $0xf7ffffff, R0, R0 - -f60_skip: - // if (mask & DV_II_53_0_bit) != 0 { - // if not((W[58]^W[61])&(1<<29)) != 0 || - // not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || - // not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || - // not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_53_0_bit - // } - // } - TBZ $0x1c, R0, f61_skip - MOVW W_58+232(FP), R1 - MOVW W_61+244(FP), R2 - EOR R2, R1, R1 - AND $0x20000000, R1, R1 - NEG R1, R1 - CBZ R1, f61_in - MOVW W_57+228(FP), R1 - MOVW W_61+244(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f61_in - MOVW W_41+164(FP), R1 - MOVW W_45+180(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f61_in - MOVW W_41+164(FP), R1 - MOVW W_45+180(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f61_in - B f61_skip - -f61_in: - AND $0xefffffff, R0, R0 - -f61_skip: - // if (mask & DV_II_54_0_bit) != 0 { - // if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || - // not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || - // not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_54_0_bit - // } - // } - TBZ $0x1d, R0, f62_skip - MOVW W_58+232(FP), R1 - MOVW W_62+248(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f62_in - MOVW W_42+168(FP), R1 - MOVW W_46+184(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f62_in - MOVW W_42+168(FP), R1 - MOVW W_46+184(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f62_in - B f62_skip - -f62_in: - AND $0xdfffffff, R0, R0 - -f62_skip: - // if (mask & DV_II_55_0_bit) != 0 { - // if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || - // not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || - // not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || - // not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_55_0_bit - // } - // } - TBZ $0x1e, R0, f63_skip - MOVW W_59+236(FP), R1 - MOVW W_63+252(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f63_in - MOVW W_57+228(FP), R1 - MOVW W_59+236(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f63_in - MOVW W_43+172(FP), R1 - MOVW W_47+188(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f63_in - MOVW W_43+172(FP), R1 - MOVW W_47+188(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f63_in - B f63_skip - -f63_in: - AND $0xbfffffff, R0, R0 - -f63_skip: - // if (mask & DV_II_56_0_bit) != 0 { - // if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || - // not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || - // not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { - // mask &= ^DV_II_56_0_bit - // } - // } - TBZ $0x1f, R0, f64_skip - MOVW W_60+240(FP), R1 - MOVW W_64+256(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f64_in - MOVW W_44+176(FP), R1 - MOVW W_48+192(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000008, R1, R1 - CBNZ R1, f64_in - MOVW W_44+176(FP), R1 - MOVW W_48+192(FP), R2 - LSR $0x19, R2, R2 - EOR R2, R1, R1 - AND $0x00000010, R1, R1 - CBNZ R1, f64_in - B f64_skip - -f64_in: - AND $0x7fffffff, R0, R0 - -f64_skip: -end: - MOVW R0, ret+320(FP) - RET diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go deleted file mode 100644 index ee95bd52..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go +++ /dev/null @@ -1,368 +0,0 @@ -// Based on the C implementation from Marc Stevens and Dan Shumow. -// https://github.com/cr-marcstevens/sha1collisiondetection - -package ubc - -type DvInfo struct { - // DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper). - // https://marc-stevens.nl/research/papers/C13-S.pdf - DvType uint32 - DvK uint32 - DvB uint32 - - // TestT is the step to do the recompression from for collision detection. - TestT uint32 - - // MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check. - MaskI uint32 - MaskB uint32 - - // Dm is the expanded message block XOR-difference defined by the DV. - Dm [80]uint32 -} - -// CalculateDvMask takes as input an expanded message block and verifies the unavoidable bitconditions -// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all -// unavoidable bitconditions for that DV have been met. -// Thus, one needs to do the recompression check for each DV that has its bit set. -func CalculateDvMaskGeneric(W [80]uint32) uint32 { - mask := uint32(0xFFFFFFFF) - mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) - mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) - mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) - mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) - mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) - mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) - mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) - mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) - mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) - mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) - mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) - mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) - mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) - mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) - mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) - mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) - mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) - mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) - mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) - mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) - - if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - } - mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { - mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) - } - if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { - mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) - } - if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { - mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) - } - if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { - mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) - } - if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { - mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) - } - if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { - mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) - } - if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { - mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) - } - if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { - mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) - } - if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { - mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) - } - if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { - mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) - } - if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { - mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) - } - if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { - mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) - } - mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) - if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { - mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) - } - mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) - if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { - mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) - } - mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) - mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) - if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { - mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) - } - mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) - if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { - mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) - } - if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { - mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) - } - if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { - mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) - } - mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) - if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { - mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) - } - if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { - mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) - } - if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - } - if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { - mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) - } - if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { - mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) - } - mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) - if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { - mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) - } - if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { - mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) - } - if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { - mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) - } - if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - } - if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { - mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) - } - if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { - mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) - } - if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { - mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) - } - if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { - mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) - } - if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { - mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) - } - mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) - if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { - mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) - } - mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) - mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) - mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) - mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) - mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) - mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) - mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) - mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) - mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) - mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) - if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { - mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) - } - if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { - mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) - } - if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { - mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) - } - if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { - mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) - } - if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { - mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) - } - mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) - if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { - mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) - } - - if mask != 0 { - if (mask & DV_I_43_0_bit) != 0 { - if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || - not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || - not((W[58]^(W[63]>>30))&(1<<0)) != 0 { - mask &= ^DV_I_43_0_bit - } - } - if (mask & DV_I_44_0_bit) != 0 { - if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || - not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || - not((W[59]^(W[64]>>30))&(1<<0)) != 0 { - mask &= ^DV_I_44_0_bit - } - } - if (mask & DV_I_46_2_bit) != 0 { - mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) - } - if (mask & DV_I_47_2_bit) != 0 { - if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || - not(not((W[41]^W[43])&(1<<6))) != 0 { - mask &= ^DV_I_47_2_bit - } - } - if (mask & DV_I_48_2_bit) != 0 { - if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || - not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { - mask &= ^DV_I_48_2_bit - } - } - if (mask & DV_I_49_2_bit) != 0 { - if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || - not((W[42]^W[50])&(1<<1)) != 0 || - not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || - not((W[38]^W[40])&(1<<1)) != 0 { - mask &= ^DV_I_49_2_bit - } - } - if (mask & DV_I_50_0_bit) != 0 { - mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) - } - if (mask & DV_I_50_2_bit) != 0 { - mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) - } - if (mask & DV_I_51_0_bit) != 0 { - mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) - } - if (mask & DV_I_51_2_bit) != 0 { - if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || - not(not((W[49]^W[51])&(1<<6))) != 0 || - not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || - not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { - mask &= ^DV_I_51_2_bit - } - } - if (mask & DV_I_52_0_bit) != 0 { - mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) - } - if (mask & DV_II_46_2_bit) != 0 { - mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) - } - if (mask & DV_II_48_0_bit) != 0 { - if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || - not((W[35]^(W[40]<<2))&(1<<30)) != 0 { - mask &= ^DV_II_48_0_bit - } - } - if (mask & DV_II_49_0_bit) != 0 { - if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || - not((W[36]^(W[41]<<2))&(1<<30)) != 0 { - mask &= ^DV_II_49_0_bit - } - } - if (mask & DV_II_49_2_bit) != 0 { - if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || - not(not((W[51]^W[53])&(1<<6))) != 0 || - not((W[50]^W[54])&(1<<1)) != 0 || - not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || - not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || - not((W[36]^(W[41]>>30))&(1<<0)) != 0 { - mask &= ^DV_II_49_2_bit - } - } - if (mask & DV_II_50_0_bit) != 0 { - if not((W[55]^W[58])&(1<<29)) != 0 || - not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || - not((W[37]^(W[42]<<2))&(1<<30)) != 0 { - mask &= ^DV_II_50_0_bit - } - } - if (mask & DV_II_50_2_bit) != 0 { - if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || - not(not((W[52]^W[54])&(1<<6))) != 0 || - not((W[51]^W[55])&(1<<1)) != 0 || - not((W[45]^W[47])&(1<<1)) != 0 || - not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || - not((W[37]^(W[42]>>30))&(1<<0)) != 0 { - mask &= ^DV_II_50_2_bit - } - } - if (mask & DV_II_51_0_bit) != 0 { - if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || - not((W[38]^(W[43]<<2))&(1<<30)) != 0 { - mask &= ^DV_II_51_0_bit - } - } - if (mask & DV_II_51_2_bit) != 0 { - if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || - not(not((W[53]^W[55])&(1<<6))) != 0 || - not((W[52]^W[56])&(1<<1)) != 0 || - not((W[46]^W[48])&(1<<1)) != 0 || - not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || - not((W[38]^(W[43]>>30))&(1<<0)) != 0 { - mask &= ^DV_II_51_2_bit - } - } - if (mask & DV_II_52_0_bit) != 0 { - if not(not((W[59]^W[60])&(1<<29))) != 0 || - not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || - not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || - not((W[39]^(W[44]<<2))&(1<<30)) != 0 { - mask &= ^DV_II_52_0_bit - } - } - if (mask & DV_II_53_0_bit) != 0 { - if not((W[58]^W[61])&(1<<29)) != 0 || - not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || - not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || - not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { - mask &= ^DV_II_53_0_bit - } - } - if (mask & DV_II_54_0_bit) != 0 { - if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || - not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || - not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { - mask &= ^DV_II_54_0_bit - } - } - if (mask & DV_II_55_0_bit) != 0 { - if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || - not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || - not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || - not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { - mask &= ^DV_II_55_0_bit - } - } - if (mask & DV_II_56_0_bit) != 0 { - if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || - not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || - not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { - mask &= ^DV_II_56_0_bit - } - } - } - - return mask -} - -func not(x uint32) uint32 { - if x == 0 { - return 1 - } - - return 0 -} - -func SHA1_dvs() []DvInfo { - return sha1_dvs -} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go deleted file mode 100644 index b7cc826a..00000000 --- a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build (!amd64 && !arm64) || noasm || !gc -// +build !amd64,!arm64 noasm !gc - -package ubc - -// Check takes as input an expanded message block and verifies the unavoidable bitconditions -// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all -// unavoidable bitconditions for that DV have been met. -// Thus, one needs to do the recompression check for each DV that has its bit set. -func CalculateDvMask(W [80]uint32) uint32 { - return CalculateDvMaskGeneric(W) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113..2331b8b4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef9..d273b664 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f2..5fe8d3b4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b..7b762370 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f..73c24dfb 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b2688656..c34c7de4 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a..0290f6ab 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec..8dbf6d04 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b..c4e9c1bb 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 4067978a..8f2edde3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,6 +363,12 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' @@ -353,13 +381,12 @@ func (p *TextParser) startLabelName() stateFn { labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index de83afe9..dfeb34be 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,34 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da..9de47b25 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index a6b01755..3feebf32 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,6 +24,7 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) @@ -62,16 +64,151 @@ var ( type ValidationScheme int const ( + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -101,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -175,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -310,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -327,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -345,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -409,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index fed9e87b..1730b0fd 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d..a9995a37 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e..91ce5b7a 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee..078910f4 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808..6acf8ab1 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151..8416275f 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
+ +cobra-logo + +
Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
+
+ Supported by: +
+
+ + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
+ +
+
# Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 00000000..54e60c28 --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298..78088db6 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f76..d3607c2d 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go index 2138f248..560bc20c 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -212,7 +212,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { manPrintOptions(buf, cmd) if len(cmd.Example) > 0 { buf.WriteString("# EXAMPLE\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) } if hasSeeAlso(cmd) { buf.WriteString("# SEE ALSO\n") @@ -240,7 +240,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { buf.WriteString(strings.Join(seealsos, ", ") + "\n") } if !cmd.DisableAutoGenTag { - buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) + fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")) } return buf.Bytes() } diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index 12592223..6eae7ccf 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -69,12 +69,12 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) } if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example) } if err := printOptions(buf, cmd, name); err != nil { @@ -87,7 +87,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) pname := parent.CommandPath() link := pname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -105,7 +105,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) cname := name + " " + child.Name() link := cname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go index c33acc2b..4901ca98 100644 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -82,13 +82,13 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str buf.WriteString("\n" + long + "\n\n") if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "::\n\n %s\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("Examples\n") buf.WriteString("~~~~~~~~\n\n") - buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + fmt.Fprintf(buf, "::\n\n%s\n\n", indentString(cmd.Example, " ")) } if err := printOptionsReST(buf, cmd, name); err != nil { @@ -101,7 +101,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str parent := cmd.Parent() pname := parent.CommandPath() ref = strings.ReplaceAll(pname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(pname, ref), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -118,7 +118,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str } cname := name + " " + child.Name() ref = strings.ReplaceAll(cname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(cname, ref), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go index 2b26d6ec..37197170 100644 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -153,7 +153,7 @@ func genFlagResult(flags *pflag.FlagSet) []cmdOption { // Todo, when we mark a shorthand is deprecated, but specify an empty message. // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + if len(flag.ShorthandDeprecated) == 0 && len(flag.Shorthand) > 0 { opt := cmdOption{ flag.Name, flag.Shorthand, diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index d4dfbc5e..2fd3c575 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -137,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -158,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -928,7 +938,6 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -986,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -1044,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1158,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true + f.args = make([]string, 0, len(arguments)) + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1174,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1200,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index f563907e..e62eab53 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,6 +8,7 @@ import ( goflag "flag" "reflect" "strings" + "time" ) // go test flags prefixes @@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // since by default those are skipped by pflag.Parse(). // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` @@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { } return goFlagSet.Parse(skippedFlags) } + diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01af..1d1e3bf9 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go index dc024807..3dee4247 100644 --- a/vendor/github.com/spf13/pflag/time.go +++ b/vendor/github.com/spf13/pflag/time.go @@ -48,7 +48,13 @@ func (d *timeValue) Type() string { return "time" } -func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} // GetTime return the time value of a flag with the given name func (f *FlagSet) GetTime(name string) (time.Time, error) { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba0..ffb24e8e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 19063416..c592f6ad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087..58db9284 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f7182..2fdf80fd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332b..de8de0cb 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. + offset := 1 + + for { + n := runtime.Callers(offset, pcs) + + if n == 0 { break } - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + frames := runtime.CallersFrames(pcs[:n]) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break } } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } + return isEmptyValue(reflect.ValueOf(object)) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty + switch objValue.Kind() { + // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. + case reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // non-nil pointers are empty if the value they point to is empty + case reflect.Ptr: + return isEmptyValue(objValue.Elem()) + } + return false +} + +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d..a0b953aa 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7..5a6bb75f 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d..5a74c4f4 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf6..0bae80e3 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe..8041803f 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e8..2950fdb4 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d6..5bb3b16c 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b73..67f80b6a 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776e..a2802764 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0..44197b80 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b06..022768bb 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a..815d271f 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde..e09acf02 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index b25641c5..521daa25 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 6bd50d4c..38fb79c0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,9 +8,8 @@ import ( "net/http" "net/http/httptrace" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 937f9b4e..fef83b42 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -8,13 +8,13 @@ import ( "time" "github.com/felixge/httpsnoop" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // middleware is an http middleware which wraps the next handler in a span. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 7edc8d10..821b80ec 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -10,14 +10,13 @@ import ( "context" "fmt" "net/http" - "os" "strings" "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.34.0/httpconv" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" ) // OTelSemConvStabilityOptIn is an environment variable. @@ -33,14 +32,6 @@ type ResponseTelemetry struct { } type HTTPServer struct { - duplicate bool - - // Old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram - - // New metrics requestBodySizeHistogram httpconv.ServerRequestBodySize responseBodySizeHistogram httpconv.ServerResponseBodySize requestDurationHistogram httpconv.ServerRequestDuration @@ -63,20 +54,10 @@ type HTTPServer struct { // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) - if s.duplicate { - return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) - } - return attrs + return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) } func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { - if s.duplicate { - return []attribute.KeyValue{ - OldHTTPServer{}.NetworkTransportAttr(network), - CurrentHTTPServer{}.NetworkTransportAttr(network), - } - } return []attribute.KeyValue{ CurrentHTTPServer{}.NetworkTransportAttr(network), } @@ -86,11 +67,7 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) - if s.duplicate { - return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) - } - return attrs + return CurrentHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. @@ -134,13 +111,13 @@ type MetricData struct { var ( metricAddOptionPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &[]metric.AddOption{} }, } metricRecordOptionPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &[]metric.RecordOption{} }, } @@ -156,18 +133,6 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) *recordOpts = (*recordOpts)[:0] metricRecordOptionPool.Put(recordOpts) - - if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { - attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) - *addOpts = append(*addOpts, o) - s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - *addOpts = (*addOpts)[:0] - metricAddOptionPool.Put(addOpts) - } } // hasOptIn returns true if the comma-separated version string contains the @@ -182,11 +147,7 @@ func hasOptIn(version, optIn string) bool { } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - server := HTTPServer{ - duplicate: duplicate, - } + server := HTTPServer{} var err error server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) @@ -203,32 +164,16 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { ), ) handleErr(err) - - if duplicate { - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) - } return server } type HTTPClient struct { - duplicate bool - - // old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram - - // new metrics requestBodySize httpconv.ClientRequestBodySize requestDuration httpconv.ClientRequestDuration } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - client := HTTPClient{ - duplicate: duplicate, - } + client := HTTPClient{} var err error client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) @@ -240,29 +185,17 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { ) handleErr(err) - if duplicate { - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) - } - return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) - if c.duplicate { - return OldHTTPClient{}.RequestTraceAttrs(req, attrs) - } - return attrs + return CurrentHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) - if c.duplicate { - return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) - } - return attrs + return CurrentHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -302,42 +235,14 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { addOptions: set, } - if c.duplicate { - attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["old"] = MetricOpts{ - measurement: set, - addOptions: set, - } - } - return opts } func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) - - if s.duplicate { - s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - } -} - -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { - if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). - return - } - - s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.TraceAttributes(host) - if s.duplicate { - return OldHTTPClient{}.TraceAttributes(host, attrs) - } - - return attrs + return CurrentHTTPClient{}.TraceAttributes(host) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go index b4036dd9..1bb207b8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -13,4 +13,3 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ //go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 9766f3ac..28c51a3b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -17,7 +17,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" ) type RequestTraceAttrsOpts struct { @@ -194,7 +194,7 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } -func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter if https { return semconvNew.URLScheme("https") } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e4ebf858..96422ad1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" ) // SplitHostPort splits a network address hostport of the form "host", @@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) // nolint: gosec // Byte size checked 16 above. + return host, int(p) //nolint:gosec // Byte size checked 16 above. } -func requiredHTTPPort(https bool, port int) int { // nolint:revive +func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter if https { if port > 0 && port != 443 { return port diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go deleted file mode 100644 index ba7fccf1..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/v120.0.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "errors" - "io" - "net/http" - "slices" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type OldHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) -} - -func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - return semconvutil.NetTransport(network) -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { - if resp.ReadBytes > 0 { - attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) - } - if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) - } - if resp.StatusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) - } - if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) - } - - return attributes -} - -// Route returns the attribute for the route. -func (o OldHTTPServer) Route(route string) attribute.KeyValue { - return semconv.HTTPRoute(route) -} - -// HTTPStatusCode returns the attribute for the HTTP status code. -// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. -func HTTPStatusCode(status int) attribute.KeyValue { - return semconv.HTTPStatusCode(status) -} - -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - -func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - var err error - requestBytesCounter, err := meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - serverLatencyMeasure, err := meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, serverLatencyMeasure -} - -func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - n := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - o.scheme(req.TLS != nil), - semconv.NetHostName(host)) - - if hostPort > 0 { - attributes = append(attributes, semconv.NetHostPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconv.NetProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return semconv.HTTPSchemeHTTPS - } - return semconv.HTTPSchemeHTTP -} - -type OldHTTPClient struct{} - -func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req, attrs) -} - -func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp, attrs) -} - -func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.status_code int - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - semconv.NetPeerName(requestHost), - ) - - if port > 0 { - attributes = append(attributes, semconv.NetPeerPort(port)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Incoming request bytes total - clientResponseSize = "http.client.response.size" // Incoming response bytes total - clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds -) - -func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - requestBytesCounter, err := meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - latencyMeasure, err := meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, latencyMeasure -} - -// TraceAttributes returns attributes for httptrace. -func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { - return append(attrs, semconv.NetHostName(host)) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go deleted file mode 100644 index 7aa5f99e..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -// Generate semconvutil package: -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go deleted file mode 100644 index b9973547..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ /dev/null @@ -1,594 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconvutil provides OpenTelemetry semantic convention utilities. -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "fmt" - "net/http" - "slices" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type HTTPServerRequestOptions struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -// HTTPClientResponse returns trace attributes for an HTTP response received by a -// client from a server. It will return the following attributes if the related -// values are defined in resp: "http.status.code", -// "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// HTTPClientResponse(resp, ClientRequest(resp.Request))) -func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientResponse(resp, attrs) -} - -// HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length". -func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientRequest(req, attrs) -} - -// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the -// related values are defined in req: "net.peer.port". -func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { - return hc.ClientRequestMetrics(req) -} - -// HTTPClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func HTTPClientStatus(code int) (codes.Code, string) { - return hc.ClientStatus(code) -} - -// HTTPServerRequest returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if -// they related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ServerRequest(server, req, opts, attrs) -} - -// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequestMetrics(server, req) -} - -// HTTPServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func HTTPServerStatus(code int) (codes.Code, string) { - return hc.ServerStatus(code) -} - -// httpConv are the HTTP semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type httpConv struct { - NetConv *netConv - - HTTPClientIPKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPResponseContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - UserAgentOriginalKey attribute.Key -} - -var hc = &httpConv{ - NetConv: nc, - - HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPMethodKey: semconv.HTTPMethodKey, - HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, - HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, - HTTPRouteKey: semconv.HTTPRouteKey, - HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, - HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, - HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, - HTTPTargetKey: semconv.HTTPTargetKey, - HTTPURLKey: semconv.HTTPURLKey, - UserAgentOriginalKey: semconv.UserAgentOriginalKey, -} - -// ClientResponse returns attributes for an HTTP response received by a client -// from a server. The following attributes are returned if the related values -// are defined in resp: "http.status.code", "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// ClientResponse(resp, ClientRequest(resp.Request)) -func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.status_code int - http.response_content_length int - */ - var n int - if resp.StatusCode > 0 { - n++ - } - if resp.ContentLength > 0 { - n++ - } - if n == 0 { - return attrs - } - - attrs = slices.Grow(attrs, n) - if resp.StatusCode > 0 { - attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) - } - if resp.ContentLength > 0 { - attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) - } - return attrs -} - -// ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - user_agent.original string - http.url string - net.peer.name string - net.peer.port int - http.request_content_length int - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. See ClientResponse. - http.response_content_length This requires the response. See ClientResponse. - net.sock.family This requires the socket used. - net.sock.peer.addr This requires the socket used. - net.sock.peer.name This requires the socket used. - net.sock.peer.port This requires the socket used. - http.resend_count This is something outside of a single request. - net.protocol.name The value is the Request is ignored, and the go client will always use "http". - net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. - */ - n := 3 // URL, peer name, proto, and method. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - if req.ContentLength > 0 { - n++ - } - - attrs = slices.Grow(attrs, n) - attrs = append(attrs, c.method(req.Method)) - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if l := req.ContentLength; l > 0 { - attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) - } - - return attrs -} - -// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the related values -// are defined in req: "net.peer.port". -func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) - - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - return attrs -} - -// ServerRequest returns attributes for an HTTP request received by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if they -// related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip", -// "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.scheme string - net.host.name string - net.host.port int - net.sock.peer.addr string - net.sock.peer.port int - user_agent.original string - http.client_ip string - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - http.target string Note: doesn't include the query parameter. - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. - http.request_content_length This requires the len() of body, which can mutate it. - http.response_content_length This requires the response. - http.route This is not available. - net.sock.peer.name This would require a DNS lookup. - net.sock.host.addr The request doesn't have access to the underlying socket. - net.sock.host.port The request doesn't have access to the underlying socket. - - */ - n := 4 // Method, scheme, proto, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - peer, peerPort := splitHostPort(req.RemoteAddr) - if peer != "" { - n++ - if peerPort > 0 { - n++ - } - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - n++ - } - - var target string - if req.URL != nil { - target = req.URL.Path - if target != "" { - n++ - } - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs = slices.Grow(attrs, n) - - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) - if peerPort > 0 { - attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) - } - - if target != "" { - attrs = append(attrs, c.HTTPTargetKey.String(target)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -// ServerRequestMetrics returns metric attributes for an HTTP request received -// by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.scheme string - http.route string - http.method string - http.status_code int - net.host.name string - net.host.port int - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - */ - - n := 3 // Method, scheme, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.methodMetric(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - if protoName != "" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -func (c *httpConv) method(method string) attribute.KeyValue { - if method == "" { - return c.HTTPMethodKey.String(http.MethodGet) - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return c.HTTPSchemeHTTPS - } - return c.HTTPSchemeHTTP -} - -func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { - xForwardedFor = xForwardedFor[:idx] - } - return xForwardedFor -} - -func requiredHTTPPort(https bool, port int) int { // nolint:revive - if https { - if port > 0 && port != 443 { - return port - } - } else { - if port > 0 && port != 80 { - return port - } - } - return -1 -} - -// Return the request host and port from the first non-empty source. -func firstHostPort(source ...string) (host string, port int) { - for _, hostport := range source { - host, port = splitHostPort(hostport) - if host != "" || port > 0 { - break - } - } - return -} - -// ClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func (c *httpConv) ClientStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -// ServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (c *httpConv) ServerStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go deleted file mode 100644 index df97255e..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/netconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "net" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -// NetTransport returns a trace attribute describing the transport protocol of the -// passed network. See the net.Dial for information about acceptable network -// values. -func NetTransport(network string) attribute.KeyValue { - return nc.Transport(network) -} - -// netConv are the network semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type netConv struct { - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetProtocolName attribute.Key - NetProtocolVersion attribute.Key - NetSockFamilyKey attribute.Key - NetSockPeerAddrKey attribute.Key - NetSockPeerPortKey attribute.Key - NetSockHostAddrKey attribute.Key - NetSockHostPortKey attribute.Key - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportInProc attribute.KeyValue -} - -var nc = &netConv{ - NetHostNameKey: semconv.NetHostNameKey, - NetHostPortKey: semconv.NetHostPortKey, - NetPeerNameKey: semconv.NetPeerNameKey, - NetPeerPortKey: semconv.NetPeerPortKey, - NetProtocolName: semconv.NetProtocolNameKey, - NetProtocolVersion: semconv.NetProtocolVersionKey, - NetSockFamilyKey: semconv.NetSockFamilyKey, - NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, - NetSockPeerPortKey: semconv.NetSockPeerPortKey, - NetSockHostAddrKey: semconv.NetSockHostAddrKey, - NetSockHostPortKey: semconv.NetSockHostPortKey, - NetTransportOther: semconv.NetTransportOther, - NetTransportTCP: semconv.NetTransportTCP, - NetTransportUDP: semconv.NetTransportUDP, - NetTransportInProc: semconv.NetTransportInProc, -} - -func (c *netConv) Transport(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return c.NetTransportTCP - case "udp", "udp4", "udp6": - return c.NetTransportUDP - case "unix", "unixgram", "unixpacket": - return c.NetTransportInProc - default: - // "ip:*", "ip4:*", and "ip6:*" all are considered other. - return c.NetTransportOther - } -} - -// Host returns attributes for a network host address. -func (c *netConv) Host(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.HostName(h)) - if p > 0 { - attrs = append(attrs, c.HostPort(p)) - } - return attrs -} - -func (c *netConv) HostName(name string) attribute.KeyValue { - return c.NetHostNameKey.String(name) -} - -func (c *netConv) HostPort(port int) attribute.KeyValue { - return c.NetHostPortKey.Int(port) -} - -func family(network, address string) string { - switch network { - case "unix", "unixgram", "unixpacket": - return "unix" - default: - if ip := net.ParseIP(address); ip != nil { - if ip.To4() == nil { - return "inet6" - } - return "inet" - } - } - return "" -} - -// Peer returns attributes for a network peer address. -func (c *netConv) Peer(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.PeerName(h)) - if p > 0 { - attrs = append(attrs, c.PeerPort(p)) - } - return attrs -} - -func (c *netConv) PeerName(name string) attribute.KeyValue { - return c.NetPeerNameKey.String(name) -} - -func (c *netConv) PeerPort(port int) attribute.KeyValue { - return c.NetPeerPortKey.Int(port) -} - -func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { - return c.NetSockPeerAddrKey.String(addr) -} - -func (c *netConv) SockPeerPort(port int) attribute.KeyValue { - return c.NetSockPeerPortKey.Int(port) -} - -// splitHostPort splits a network address hostport of the form "host", -// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", -// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and -// port. -// -// An empty host is returned if it is not provided or unparsable. A negative -// port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { - port = -1 - - if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") - if addrEnd < 0 { - // Invalid hostport. - return - } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { - host = hostport[1:addrEnd] - return - } - } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { - host = hostport - return - } - } - - host, pStr, err := net.SplitHostPort(hostport) - if err != nil { - return - } - - p, err := strconv.ParseUint(pStr, 10, 16) - if err != nil { - return - } - return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. -} - -func netProtocol(proto string) (name string, version string) { - name, version, _ = strings.Cut(proto, "/") - switch name { - case "HTTP": - name = "http" - case "QUIC": - name = "quic" - case "SPDY": - name = "spdy" - default: - name = strings.ToLower(name) - } - return name, version -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index e4c02a42..514ae675 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,14 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // Transport implements the http.RoundTripper interface and wraps @@ -148,11 +148,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } if err == nil { - // For handling response bytes we leverage a callback when the client reads the http response - readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts) - } - + readRecordFunc := func(int64) {} res.Body = newWrappedBody(span, readRecordFunc, res.Body) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 2fe5a136..dfb53cf1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.62.0" + return "0.63.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 6bf3abc4..2b53a25e 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -7,3 +7,4 @@ ans nam valu thirdparty +addOpt diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 5f69cc02..b01762ff 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -10,6 +10,7 @@ linters: - depguard - errcheck - errorlint + - gocritic - godot - gosec - govet @@ -86,6 +87,18 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true godot: exclude: # Exclude links. @@ -167,7 +180,10 @@ linters: - fmt.Print - fmt.Printf - fmt.Println + - name: unused-parameter + - name: unused-receiver - name: unnecessary-stmt + - name: use-any - name: useless-break - name: var-declaration - name: var-naming @@ -224,10 +240,6 @@ linters: - linters: - gosec text: 'G402: TLS MinVersion too low.' - paths: - - third_party$ - - builtin$ - - examples$ issues: max-issues-per-linter: 0 max-same-issues: 0 @@ -237,14 +249,12 @@ formatters: - goimports - golines settings: + gofumpt: + extra-rules: true goimports: local-prefixes: - - go.opentelemetry.io + - go.opentelemetry.io/otel golines: max-len: 120 exclusions: generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 40d62fa2..53285058 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -2,5 +2,8 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4acc7570..f3abcfdc 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + ## [1.37.0/0.59.0/0.13.0] 2025-06-25 ### Added @@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 @@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2..26a03aed 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index f9ddc281..0b3ae855 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -192,6 +192,35 @@ should have `go test -bench` output in their description. should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) output in their description. +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have be +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -586,6 +619,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -640,13 +677,6 @@ should be canceled. ## Approvers and Maintainers -### Triagers - -- [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent - -### Approvers - ### Maintainers - [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) @@ -655,6 +685,21 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). + +### Approvers + +- [Flc](https://github.com/flc1125), Independent + +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). + ### Emeritus - [Aaron Clawson](https://github.com/MadVikingGod) @@ -665,6 +710,8 @@ should be canceled. - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 4fa423ca..bc0f1f92 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink @@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT) docker run --rm \ -u $(DOCKER_USER) \ --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ $(WEAVER_IMAGE) registry generate \ diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5fa1b75c..6b7ab5f2 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -53,18 +53,25 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | +| Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | | Ubuntu | 1.23 | arm64 | +| macOS 13 | 1.25 | amd64 | | macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | +| Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | +| Windows | 1.25 | 386 | | Windows | 1.24 | 386 | | Windows | 1.23 | 386 | diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml new file mode 100644 index 00000000..8041fc62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 318e42fc..6333d34b 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 3eeaa5d4..624ebbe3 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -15,8 +15,8 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } allowed := make(map[Key]struct{}, len(keys)) @@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } forbid := make(map[Key]struct{}, len(keys)) diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index b76d2bbf..08755043 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -12,7 +12,7 @@ import ( ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index f2ba89ce..8df6249f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index d9a22c65..80a9e564 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 3028f9a4..8c6928ca 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefcea..64735d38 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -31,11 +31,11 @@ type ( // Distinct is a unique identifier of a Set. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct is designed to ensure equivalence stability: comparisons will + // return the same value across versions. For this reason, Distinct should + // always be used as a map key instead of a Set. Distinct struct { - iface interface{} + iface any } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid Set. +// Valid reports whether this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } @@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct { return l.equivalent } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } @@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct { // computeDistinctFixed computes a Distinct for small slices. It returns nil // if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +func computeDistinctFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { // computeDistinctReflect computes a Distinct using reflection, works for any // size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +func computeDistinctReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 817eecac..653c33a8 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -22,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -262,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe242..f83a448e 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b12..d48847ed 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 935bd487..a311fbb4 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python -FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 82a4c2c2..492480f8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -8,6 +8,8 @@ import ( "errors" "time" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -18,8 +20,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" - colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 3977c1f8..35cdf466 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -9,12 +9,13 @@ import ( "fmt" "sync" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using gRPC. @@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } @@ -119,7 +120,7 @@ var errShutdown = errors.New("gRPC exporter is shutdown") type shutdownClient struct{} -func (c shutdownClient) err(ctx context.Context) error { +func (shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } @@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (*Exporter) MarshalLog() any { return struct{ Type string }{Type: "OTLP/gRPC"} } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index b34d35b0..7909cac5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 3f0a518a..30446bd2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string Client Client diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index ca4544f0..d9bfd6e1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -6,9 +6,10 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 2e7690e4..43359c89 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/instrumentation" commonpb "go.opentelemetry.io/proto/otlp/common/v1" + + "go.opentelemetry.io/otel/sdk/instrumentation" ) func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go index db7b698a..526bb5e0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/resource" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/sdk/resource" ) // Resource transforms a Resource into an OTLP Resource. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index bf27ef02..379bc817 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "math" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP @@ -154,7 +155,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { for _, otLink := range links { // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. - otLink := otLink tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() @@ -189,7 +189,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for i := 0; i < len(es); i++ { + for i := range es { events[i] = &tracepb.Span_Event{ Name: es[i].Name, TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8236c995..4b4cc76f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -9,6 +9,8 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -20,8 +22,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { @@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (c *client) MarshalLog() interface{} { +func (c *client) MarshalLog() any { return struct { Type string Endpoint string diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index ed2ddce7..3b79c1a0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index adbca7d3..86d7f4ba 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 49e4ac4f..bf5cf311 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index ebda5026..05188260 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { // Extract returns a copy of parent with the baggage from the carrier added. // If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked // for multiple values extraction. Otherwise, Get is called. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { if multiCarrier, ok := carrier.(ValuesGetter); ok { return extractMultiBaggage(parent, multiCarrier) } @@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context } // Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { +func (Baggage) Fields() []string { return []string{baggageHeader} } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 5c8c26ea..0a32c59a 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -20,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string { } // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6870e316..6692d266 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 68d296cb..1be472e9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -19,7 +19,7 @@ import ( // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" // will also enable this). var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { + if strings.EqualFold(v, "true") { return v, true } return "", false @@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 203cd9d6..c6440a13 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" "os" + "strconv" "strings" "sync" @@ -17,12 +18,15 @@ import ( // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View - exemplarFilter exemplar.Filter + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter + cardinalityLimit int } +const defaultCardinalityLimit = 0 + // readerSignals returns a force-flush and shutdown function for a // MeterProvider to call in their respective options. All Readers c contains // will have their force-flush and shutdown methods unified into returned @@ -69,8 +73,9 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { conf := config{ - res: resource.Default(), - exemplarFilter: exemplar.TraceBasedFilter, + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + cardinalityLimit: cardinalityLimitFromEnv(), } for _, o := range meterProviderOptionsFromEnv() { conf = o.apply(conf) @@ -155,6 +160,21 @@ func WithExemplarFilter(filter exemplar.Filter) Option { }) } +// WithCardinalityLimit sets the cardinality limit for the MeterProvider. +// +// The cardinality limit is the hard limit on the number of metric datapoints +// that can be collected for a single instrument in a single collect cycle. +// +// Setting this to a zero or negative value means no limit is applied. +func WithCardinalityLimit(limit int) Option { + // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT` + // can also be used to set this value. + return optionFunc(func(cfg config) config { + cfg.cardinalityLimit = limit + return cfg + }) +} + func meterProviderOptionsFromEnv() []Option { var opts []Option // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar @@ -170,3 +190,17 @@ func meterProviderOptionsFromEnv() []Option { } return opts } + +func cardinalityLimitFromEnv() int { + const cardinalityLimitKey = "OTEL_GO_X_CARDINALITY_LIMIT" + v := strings.TrimSpace(os.Getenv(cardinalityLimitKey)) + if v == "" { + return defaultCardinalityLimit + } + n, err := strconv.Atoi(v) + if err != nil { + otel.Handle(err) + return defaultCardinalityLimit + } + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 90a4ae16..0f3b9d62 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -39,6 +39,30 @@ // Meter.RegisterCallback and Registration.Unregister to add and remove // callbacks without leaking memory. // +// # Cardinality Limits +// +// Cardinality refers to the number of unique attributes collected. High cardinality can lead to +// excessive memory usage, increased storage costs, and backend performance issues. +// +// Currently, the OpenTelemetry Go Metric SDK does not enforce a cardinality limit by default +// (note that this may change in a future release). Use [WithCardinalityLimit] to set the +// cardinality limit as desired. +// +// New attribute sets are dropped when the cardinality limit is reached. The measurement of +// these sets are aggregated into +// a special attribute set containing attribute.Bool("otel.metric.overflow", true). +// This ensures total metric values (e.g., Sum, Count) remain correct for the +// collection cycle, but information about the specific dropped sets +// is not preserved. +// +// Recommendations: +// +// - Set the limit based on the theoretical maximum combinations or expected +// active combinations. The OpenTelemetry Specification recommends a default of 2000. +// - A too high of a limit increases worst-case memory overhead in the SDK and may cause downstream +// issues for databases that cannot handle high cardinality. +// - A too low of a limit causes loss of attribute detail as more data falls into overflow. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 549d3bd5..38b8745e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -58,10 +58,7 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // SimpleFixedSizeExemplarReservoir with a reservoir equal to the // smaller of the maximum number of buckets configured on the // aggregation or twenty (e.g. min(20, max_buckets)). - n = int(a.MaxSize) - if n > 20 { - n = 20 - } + n = min(int(a.MaxSize), 20) } else { // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir // This Exemplar reservoir MAY take a configuration parameter for @@ -69,11 +66,11 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // provided, the default size MAY be the number of possible // concurrent threads (e.g. number of CPUs) to help reduce // contention. Otherwise, a default size of 1 SHOULD be used. - n = runtime.NumCPU() - if n < 1 { - // Should never be the case, but be defensive. - n = 1 - } + // + // Use runtime.GOMAXPROCS instead of runtime.NumCPU to support + // containerized environments that may have less than the total number + // of logical CPUs available on the local machine allocated to it. + n = max(runtime.GOMAXPROCS(0), 1) } return exemplar.FixedSizeReservoirProvider(n) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go index b595e2ac..b50f5c15 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go @@ -24,11 +24,11 @@ func TraceBasedFilter(ctx context.Context) bool { } // AlwaysOnFilter is a [Filter] that always offers measurements. -func AlwaysOnFilter(ctx context.Context) bool { +func AlwaysOnFilter(context.Context) bool { return true } // AlwaysOffFilter is a [Filter] that never offers measurements. -func AlwaysOffFilter(ctx context.Context) bool { +func AlwaysOffFilter(context.Context) bool { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 1fb1e009..08e8f68f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -14,7 +14,7 @@ import ( // FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. func FixedSizeReservoirProvider(k int) ReservoirProvider { - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewFixedSizeReservoir(k) } } @@ -56,7 +56,7 @@ func newFixedSizeReservoir(s *storage) *FixedSizeReservoir { // randomFloat64 returns, as a float64, a uniform pseudo-random number in the // open interval (0.0,1.0). -func (r *FixedSizeReservoir) randomFloat64() float64 { +func (*FixedSizeReservoir) randomFloat64() float64 { // TODO: Use an algorithm that avoids rejection sampling. For example: // // const precision = 1 << 53 // 2^53 @@ -125,13 +125,11 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a if int(r.count) < cap(r.store) { r.store[r.count] = newMeasurement(ctx, t, n, a) - } else { - if r.count == r.next { - // Overwrite a random existing measurement with the one offered. - idx := int(rand.Int64N(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) - r.advance() - } + } else if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rand.Int64N(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() } r.count++ } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go index 3b76cf30..decab613 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -16,7 +16,7 @@ import ( func HistogramReservoirProvider(bounds []float64) ReservoirProvider { cp := slices.Clone(bounds) slices.Sort(cp) - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewHistogramReservoir(cp) } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index 18891ed5..63cccc50 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -75,7 +75,7 @@ type Instrument struct { nonComparable // nolint: unused } -// IsEmpty returns if all Instrument fields are their zero-value. +// IsEmpty reports whether all Instrument fields are their zero-value. func (i Instrument) IsEmpty() bool { return i.Name == "" && i.Description == "" && @@ -204,7 +204,7 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record i.aggregate(ctx, val, c.Attributes()) } -func (i *int64Inst) Enabled(_ context.Context) bool { +func (i *int64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } @@ -245,7 +245,7 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re i.aggregate(ctx, val, c.Attributes()) } -func (i *float64Inst) Enabled(_ context.Context) bool { +func (i *float64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go index 8396faaa..129920cb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go @@ -18,10 +18,10 @@ func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N type dropRes[N int64 | float64] struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} +func (*dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { +func (*dropRes[N]) Collect(dest *[]exemplar.Exemplar) { clear(*dest) // Erase elements to let GC collect objects *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index ae1f5934..857eddf3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -183,8 +183,8 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) var count int32 for high-low >= p.maxSize { - low = low >> 1 - high = high >> 1 + low >>= 1 + high >>= 1 count++ if count > expoMaxScale-expoMinScale { return count @@ -225,7 +225,7 @@ func (b *expoBuckets) record(bin int32) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts) b.counts = b.counts[:newLength] for i := 1; i < int(shift); i++ { b.counts[i] = 0 @@ -264,7 +264,7 @@ func (b *expoBuckets) downscale(delta int32) { // new Counts: [4, 14, 30, 10] if len(b.counts) <= 1 || delta < 1 { - b.startBin = b.startBin >> delta + b.startBin >>= delta return } @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int32) { lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] - b.startBin = b.startBin >> delta + b.startBin >>= delta } // newExponentialHistogram returns an Aggregator that summarizes a set of @@ -350,7 +350,9 @@ func (e *expoHistogram[N]) measure( v.res.Offer(ctx, value, droppedAttr) } -func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. @@ -411,7 +413,9 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index d3068484..736287e7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -140,7 +140,9 @@ type histogram[N int64 | float64] struct { start time.Time } -func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { +func (s *histogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that @@ -190,7 +192,9 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *histogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 350ccebd..4bbe624c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -55,7 +55,9 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.values[attr.Equivalent()] = d } -func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -75,7 +77,9 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -126,7 +130,9 @@ type precomputedLastValue[N int64 | float64] struct { *lastValue[N] } -func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -146,7 +152,9 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 612cde43..1b4b2304 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -70,7 +70,9 @@ type sum[N int64 | float64] struct { start time.Time } -func (s *sum[N]) delta(dest *metricdata.Aggregation) int { +func (s *sum[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -105,7 +107,9 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *sum[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -165,7 +169,9 @@ type precomputedSum[N int64 | float64] struct { reported map[attribute.Distinct]N } -func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() newReported := make(map[attribute.Distinct]N) @@ -206,7 +212,9 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md index 59f736b7..be0714a5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -1,47 +1,16 @@ # Experimental Features -The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. +The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. These feature may change in backwards incompatible ways as feedback is applied. See the [Compatibility and Stability](#compatibility-and-stability) section for more information. ## Features -- [Cardinality Limit](#cardinality-limit) - [Exemplars](#exemplars) - [Instrument Enabled](#instrument-enabled) -### Cardinality Limit - -The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. - -This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. -The value must be an integer value. -All other values are ignored. - -If the value set is less than or equal to `0`, no limit will be applied. - -#### Examples - -Set the cardinality limit to 2000. - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=2000 -``` - -Set an infinite cardinality limit (functionally equivalent to disabling the feature). - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=-1 -``` - -Disable the cardinality limit. - -```console -unset OTEL_GO_X_CARDINALITY_LIMIT -``` - ### Exemplars A sample of measurements made may be exported directly as a set of exemplars. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index a9860623..294dcf84 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -10,25 +10,8 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( "context" "os" - "strconv" ) -// CardinalityLimit is an experimental feature flag that defines if -// cardinality limits should be applied to the recorded metric data-points. -// -// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment -// variable to the integer limit value you want to use. -// -// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 -// will disable the cardinality limits. -var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { @@ -36,6 +19,7 @@ type Feature[T any] struct { parse func(v string) (T, bool) } +//nolint:unused func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" return Feature[T]{ @@ -63,7 +47,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok @@ -73,7 +57,7 @@ func (f Feature[T]) Enabled() bool { // // EnabledInstrument interface is implemented by synchronous instruments. type EnabledInstrument interface { - // Enabled returns whether the instrument will process measurements for the given context. + // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 96e77908..85d3dc20 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -129,7 +129,7 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr } // MarshalLog returns logging data about the ManualReader. -func (r *ManualReader) MarshalLog() interface{} { +func (r *ManualReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index c500fd9f..e0a1e90e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) @@ -423,7 +422,7 @@ func (m *meter) Float64ObservableGauge( } func validateInstrumentName(name string) error { - if len(name) == 0 { + if name == "" { return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) } if len(name) > 255 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 0a48aed7..f08c771a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -114,7 +114,7 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri cancel: cancel, done: make(chan struct{}), rmPool: sync.Pool{ - New: func() interface{} { + New: func() any { return &metricdata.ResourceMetrics{} }, }, @@ -234,7 +234,7 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet } // collect unwraps p as a produceHolder and returns its produce results. -func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { +func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error { if p == nil { return ErrReaderNotRegistered } @@ -349,7 +349,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the PeriodicReader. -func (r *PeriodicReader) MarshalLog() interface{} { +func (r *PeriodicReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 7bdb699c..408fddc8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -17,7 +17,6 @@ import ( "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" - "go.opentelemetry.io/otel/sdk/metric/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) @@ -37,17 +36,24 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { +func newPipeline( + res *resource.Resource, + reader Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, - int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, - float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, - exemplarFilter: exemplarFilter, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, + cardinalityLimit: cardinalityLimit, // aggregations is lazy allocated when needed. } } @@ -65,12 +71,13 @@ type pipeline struct { views []View sync.Mutex - int64Measures map[observableID[int64]][]aggregate.Measure[int64] - float64Measures map[observableID[float64]][]aggregate.Measure[float64] - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List - exemplarFilter exemplar.Filter + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter + cardinalityLimit int } // addInt64Measure adds a new int64 measure to the pipeline for each observer. @@ -388,10 +395,9 @@ func (i *inserter[N]) cachedAggregator( b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation // limits for the builder (an all the created aggregates). - // CardinalityLimit.Lookup returns 0 by default if unset (or + // cardinalityLimit will be 0 by default if unset (or // unrecognized input). Use that value directly. - b.AggregationLimit, _ = x.CardinalityLimit.Lookup() - + b.AggregationLimit = i.pipeline.cardinalityLimit in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} @@ -426,7 +432,7 @@ func (i *inserter[N]) logConflict(id instID) { } const msg = "duplicate metric stream definitions" - args := []interface{}{ + args := []any{ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), @@ -460,7 +466,7 @@ func (i *inserter[N]) logConflict(id instID) { global.Warn(msg, args...) } -func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { +func (*inserter[N]) instID(kind InstrumentKind, stream Stream) instID { var zero N return instID{ Name: stream.Name, @@ -590,10 +596,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { +func newPipelines( + res *resource.Resource, + readers []Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views, exemplarFilter) + p := newPipeline(res, r, views, exemplarFilter, cardinalityLimit) r.register(p) pipes = append(pipes, p) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index 2fca89e5..b0a6ec58 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter, conf.cardinalityLimit), forceFlush: flush, shutdown: sdown, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index c96e500a..5c1cea82 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -117,7 +117,7 @@ type produceHolder struct { type shutdownProducer struct{} // produce returns an ErrReaderShutdown error. -func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { +func (shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { return ErrReaderShutdown } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index 0e5adc1a..dd9051a7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cefe4ab9..3f20eb7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 0d861971..bbe142d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 16a062ad..4a1b017e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 78190392..5fed33d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 01b4d27a..51da76e8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index f537e5ca..7252af79 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +76,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 6712ce80..138e5772 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 09b91e1e..28e1e4f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -112,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns whether r and o represent the same resource. Two resources can +// Equal reports whether r and o represent the same resource. Two resources can // be equal even if they have different schema URLs. // // See the documentation on the [Resource] type for the pitfalls of using == diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6966ed86..9bc3e525 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" + "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) +var queueFull = otelconv.ErrorTypeAttr("queue_full") + // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -67,6 +78,11 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + selfObservabilityEnabled bool + callbackRegistration metric.Registration + spansProcessedCounter otelconv.SDKProcessorSpanProcessed + componentNameAttr attribute.KeyValue + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + if x.SelfObservability.Enabled() { + bsp.selfObservabilityEnabled = true + bsp.componentNameAttr = componentName() + + var err error + bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( + bsp.componentNameAttr, + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + +func componentName() attribute.KeyValue { + id := nextProcessorID() + name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) + return semconv.OTelComponentName(name) +} + +// newBSPObs creates and returns a new set of metrics instruments and a +// registration for a BatchSpanProcessor. It is the caller's responsibility +// to unregister the registration when it is no longer needed. +func newBSPObs( + cmpnt attribute.KeyValue, + qLen func() int64, + qMax int64, +) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { + meter := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + err = errors.Join(err, e) + + spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + err = errors.Join(err, e) + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + attrs := metric.WithAttributes(cmpnt, cmpntT) + + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSize.Inst(), qLen(), attrs) + o.ObserveInt64(qCap.Inst(), qMax, attrs) + return nil + }, + qSize.Inst(), + qCap.Inst(), + ) + err = errors.Join(err, e) + + return spansProcessed, reg, err +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.selfObservabilityEnabled { + err = errors.Join(err, bsp.callbackRegistration.Unregister()) + } }) return err } @@ -171,7 +254,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, int64(l), + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 1f60524e..e58e7f6e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index c8d3fb7e..3649322a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -32,7 +32,7 @@ type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) @@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md new file mode 100644 index 00000000..feec16fa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.span.live` +- `otel.sdk.span.started` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go new file mode 100644 index 00000000..2fcbbcc6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. +package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 0e2a2e7c..37ce2ac8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" @@ -20,6 +26,7 @@ import ( const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" + selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" ) // tracerProviderConfig. @@ -45,7 +52,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, + provider: p, + instrumentationScope: is, + selfObservabilityEnabled: x.SelfObservability.Enabled(), + } + if t.selfObservabilityEnabled { + var err error + t.spanLiveMetric, t.spanStartedMetric, err = newInst() + if err != nil { + msg := "failed to create self-observability metrics for tracer: %w" + err := fmt.Errorf(msg, err) + otel.Handle(err) + } } p.namedTracer[is] = t } @@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } +func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { + m := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + spanLiveMetric, e := otelconv.NewSDKSpanLive(m) + err = errors.Join(err, e) + + spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) + err = errors.Join(err, e) + + return spanLiveMetric, spanStartedMetric, err +} + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index aa7b262d..689663d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 664e13e0..411d9ccd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { } // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index d511d0f2..63aa3378 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 1785a4bb..b376051f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.selfObservabilityEnabled { + defer func() { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpan(context.Background(), s) + set := spanLiveSet(s.spanContext.IsSampled()) + s.tracer.spanLiveMetric.AddSet(ctx, -1, set) + }() + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 0b65ae9a..e965c4cc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,7 +7,9 @@ import ( "context" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +19,10 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + spanLiveMetric otelconv.SDKSpanLive + spanStartedMetric otelconv.SDKSpanStarted } var _ trace.Tracer = &tracer{} @@ -46,17 +52,25 @@ func (tr *tracer) Start( } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.selfObservabilityEnabled { + psc := trace.SpanContextFromContext(ctx) + set := spanStartedSet(psc, s) + tr.spanStartedMetric.AddSet(newCtx, 1, set) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. func (tr *tracer) newRecordingSpan( + ctx context.Context, psc, sc trace.SpanContext, name string, sr SamplingResult, @@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.selfObservabilityEnabled { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + set := spanLiveSet(s.spanContext.IsSampled()) + tr.spanLiveMetric.AddSet(ctx, 1, set) + } + return s } @@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedSetKey struct { + parent parentState + sampling samplingState +} + +var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ + {parentStateNoParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + + {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + + {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), +} + +func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { + key := spanStartedSetKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + return spanStartedSetCache[key] +} + +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index b84dd2c5..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index c0217af6..7f97cc31 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md deleted file mode 100644 index 82e1f46b..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.20.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go deleted file mode 100644 index 6685c392..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ /dev/null @@ -1,1198 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes HTTP attributes. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the hTTP request method. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the [HTTP - // response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the hTTP request method. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the [HTTP response -// status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTP Server spans attributes -const ( - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the URI scheme identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) - // if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the URI scheme identifying the used -// protocol. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the transport protocol used. See - // note below. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. It represents the application - // layer protocol used. The value SHOULD be normalized to lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. It represents the version - // of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `net.protocol.version` refers to the version of the protocol used - // and might be different from the protocol client's version. If the HTTP - // client used has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the remote - // socket peer name. - // - // Type: string - // RequirementLevel: Recommended (If available and different from - // `net.peer.name` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 'proxy.example.com' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the remote - // socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, - // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '127.0.0.1', '/tmp/mysql.sock' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the remote - // socket peer port. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.peer.port` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 16456 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the protocol - // [address - // family](https://man7.org/linux/man-pages/man7/address_families.7.html) - // which is used for communication. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if - // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers - // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in - // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support - // instrumentations that follow previous versions of this document.) - // Stability: stable - // Examples: 'inet6', 'bluetooth' - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the logical remote hostname, see - // note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an - // extra DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the logical remote port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the logical local hostname or - // similar, see note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the logical local port number, - // preferably the one that the peer used to connect - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the local - // socket address. Useful in case of a multi-IP host. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '192.168.0.1' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the local - // socket port number. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If defined for the address - // family and if different than `net.host.port` and if `net.sock.host.addr` - // is set. In other cases, it is still recommended to set this.) - // Stability: stable - // Examples: 35555 - NetSockHostPortKey = attribute.Key("net.sock.host.port") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. It represents the application -// layer protocol used. The value SHOULD be normalized to lowercase. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. It represents the version of -// the application layer protocol used. See note below. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the remote socket -// peer name. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the remote socket -// peer address: IPv4 or IPv6 for internet protocols, path for local -// communication, -// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the remote socket -// peer port. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the logical remote -// hostname, see note below. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the logical remote port -// number -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the logical local -// hostname or similar, see note below. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the logical local port -// number, preferably the one that the peer used to connect -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the local socket -// address. Useful in case of a multi-IP host. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the local socket -// port number. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostConnectionTypeKey is the attribute Key conforming to the - // "net.host.connection.type" semantic conventions. It represents the - // internet connection type currently being used by the host. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - - // NetHostConnectionSubtypeKey is the attribute Key conforming to the - // "net.host.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - - // NetHostCarrierNameKey is the attribute Key conforming to the - // "net.host.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - - // NetHostCarrierMccKey is the attribute Key conforming to the - // "net.host.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - - // NetHostCarrierMncKey is the attribute Key conforming to the - // "net.host.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - - // NetHostCarrierIccKey is the attribute Key conforming to the - // "net.host.carrier.icc" semantic conventions. It represents the ISO - // 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// NetHostCarrierName returns an attribute KeyValue conforming to the -// "net.host.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetHostCarrierName(val string) attribute.KeyValue { - return NetHostCarrierNameKey.String(val) -} - -// NetHostCarrierMcc returns an attribute KeyValue conforming to the -// "net.host.carrier.mcc" semantic conventions. It represents the mobile -// carrier country code. -func NetHostCarrierMcc(val string) attribute.KeyValue { - return NetHostCarrierMccKey.String(val) -} - -// NetHostCarrierMnc returns an attribute KeyValue conforming to the -// "net.host.carrier.mnc" semantic conventions. It represents the mobile -// carrier network code. -func NetHostCarrierMnc(val string) attribute.KeyValue { - return NetHostCarrierMncKey.String(val) -} - -// NetHostCarrierIcc returns an attribute KeyValue conforming to the -// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetHostCarrierIcc(val string) attribute.KeyValue { - return NetHostCarrierIccKey.String(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the size -// of the request payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the size -// of the response payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Semantic convention for attributes that describe messaging source on broker -const ( - // MessagingSourceNameKey is the attribute Key conforming to the - // "messaging.source.name" semantic conventions. It represents the message - // source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely - // identify the broker. - MessagingSourceNameKey = attribute.Key("messaging.source.name") - - // MessagingSourceTemplateKey is the attribute Key conforming to the - // "messaging.source.template" semantic conventions. It represents the low - // cardinality representation of the messaging source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would - // be a source name involving a user name or product id. Although the - // source name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - - // MessagingSourceTemporaryKey is the attribute Key conforming to the - // "messaging.source.temporary" semantic conventions. It represents a - // boolean that is true if the message source is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - - // MessagingSourceAnonymousKey is the attribute Key conforming to the - // "messaging.source.anonymous" semantic conventions. It represents a - // boolean that is true if the message source is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") -) - -// MessagingSourceName returns an attribute KeyValue conforming to the -// "messaging.source.name" semantic conventions. It represents the message -// source name -func MessagingSourceName(val string) attribute.KeyValue { - return MessagingSourceNameKey.String(val) -} - -// MessagingSourceTemplate returns an attribute KeyValue conforming to the -// "messaging.source.template" semantic conventions. It represents the low -// cardinality representation of the messaging source name -func MessagingSourceTemplate(val string) attribute.KeyValue { - return MessagingSourceTemplateKey.String(val) -} - -// MessagingSourceTemporary returns an attribute KeyValue conforming to the -// "messaging.source.temporary" semantic conventions. It represents a boolean -// that is true if the message source is temporary and might not exist anymore -// after messages are processed. -func MessagingSourceTemporary(val bool) attribute.KeyValue { - return MessagingSourceTemporaryKey.Bool(val) -} - -// MessagingSourceAnonymous returns an attribute KeyValue conforming to the -// "messaging.source.anonymous" semantic conventions. It represents a boolean -// that is true if the message source is anonymous (could be unnamed or have -// auto-generated name). -func MessagingSourceAnonymous(val bool) attribute.KeyValue { - return MessagingSourceAnonymousKey.Bool(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaClientIDKey is the attribute Key conforming to the - // "messaging.kafka.client_id" semantic conventions. It represents the - // client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the - // "messaging.kafka.source.partition" semantic conventions. It represents - // the partition the message is received from. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaClientID returns an attribute KeyValue conforming to the -// "messaging.kafka.client_id" semantic conventions. It represents the client -// ID for the Consumer or Producer that is handling the message. -func MessagingKafkaClientID(val string) attribute.KeyValue { - return MessagingKafkaClientIDKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to -// the "messaging.kafka.source.partition" semantic conventions. It represents -// the partition the message is received from. -func MessagingKafkaSourcePartition(val int) attribute.KeyValue { - return MessagingKafkaSourcePartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqClientIDKey is the attribute Key conforming to the - // "messaging.rocketmq.client_id" semantic conventions. It represents the - // unique identifier for each client. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqClientID returns an attribute KeyValue conforming to the -// "messaging.rocketmq.client_id" semantic conventions. It represents the -// unique identifier for each client. -func MessagingRocketmqClientID(val string) attribute.KeyValue { - return MessagingRocketmqClientIDKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go deleted file mode 100644 index 0d1f55a8..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.20.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go deleted file mode 100644 index 63776393..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go deleted file mode 100644 index f40c9782..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go deleted file mode 100644 index 9c184063..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go deleted file mode 100644 index 3d44dae2..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ /dev/null @@ -1,2060 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) -// on Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// Heroku dyno metadata -const ( - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") -) - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a general computing instance. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID. For Cloud, this - // value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](../../resource/semantic_conventions/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// A service instance. -const ( - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go deleted file mode 100644 index 95d0210e..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go deleted file mode 100644 index 90b1b045..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ /dev/null @@ -1,2599 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Recommended (Should be collected by default only if - // there is sanitization that excludes sensitive information.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no - // longer required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Call-level attributes for Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (when performing one of the - // operations in this list) - // Stability: stable - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as - // default)) - // Stability: stable - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if available) - // Stability: stable - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (if response was received) - // Stability: stable - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: ConditionallyRequired (when response was received and - // contained sub-code.) - // Stability: stable - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: ConditionallyRequired (when available) - // Stability: stable - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](../../resource/semantic_conventions/README.md#service) - // of the remote service. SHOULD be equal to the actual `service.name` - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](../../resource/semantic_conventions/README.md#service) of -// the remote service. SHOULD be equal to the actual `service.name` resource -// attribute of the remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the full HTTP request URL in the form - // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is - // not transmitted over HTTP, but if it is known, it should be included - // nevertheless. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the - // attribute's value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the full HTTP request URL in the form -// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not -// transmitted over HTTP, but if it is known, it should be included -// nevertheless. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// Semantic Convention for HTTP Server -const ( - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the full request target as passed in - // a HTTP request line or equivalent. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '/users/12314/?q=ddds' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" - // semantic conventions. It represents the IP address of the original - // client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which - // would - // identify the network-level peer, which may be a proxy. - // - // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that - // other - // source just confirms the same value as `net.sock.peer.addr`. - // Rationale: For `net.sock.peer.addr`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.sock.peer.addr` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the full request target as passed in a -// HTTP request line or equivalent. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPClientIP returns an attribute KeyValue conforming to the -// "http.client_ip" semantic conventions. It represents the IP address of the -// original client behind all proxies, if known (e.g. from -// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). -func HTTPClientIP(val string) attribute.KeyValue { - return HTTPClientIPKey.String(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// Semantic convention for a consumer of messages received from a messaging -// system -const ( - // MessagingConsumerIDKey is the attribute Key conforming to the - // "messaging.consumer.id" semantic conventions. It represents the - // identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if - // both are present, or only `messaging.kafka.consumer.group`. For brokers, - // such as RabbitMQ and Artemis, set it to the `client_id` of the client - // consuming the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") -) - -// MessagingConsumerID returns an attribute KeyValue conforming to the -// "messaging.consumer.id" semantic conventions. It represents the identifier -// for the consumer receiving a message. For Kafka, set it to -// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both -// are present, or only `messaging.kafka.consumer.group`. For brokers, such as -// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the -// message. -func MessagingConsumerID(val string) attribute.KeyValue { - return MessagingConsumerIDKey.String(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// Tech-specific attributes for Connect RPC. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If response is not successful - // and if error code available.) - // Stability: stable - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md deleted file mode 100644 index 02b56115..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md +++ /dev/null @@ -1,4 +0,0 @@ - -# Migration from v1.33.0 to v1.34.0 - -The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md deleted file mode 100644 index fab06c97..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.34.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 00000000..24805478 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md new file mode 100644 index 00000000..d795247f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go similarity index 89% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go index 5b566625..b6b27498 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import "go.opentelemetry.io/otel/attribute" @@ -28,7 +28,8 @@ const ( // AndroidOSAPILevelKey is the attribute Key conforming to the // "android.os.api_level" semantic conventions. It represents the uniquely // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found [here]. + // the android operating system. More information can be found in the + // [Android API levels documentation]. // // Type: string // RequirementLevel: Recommended @@ -36,16 +37,17 @@ const ( // // Examples: "33", "32" // - // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels AndroidOSAPILevelKey = attribute.Key("android.os.api_level") ) // AndroidOSAPILevel returns an attribute KeyValue conforming to the // "android.os.api_level" semantic conventions. It represents the uniquely // identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found [here]. +// the android operating system. More information can be found in the +// [Android API levels documentation]. // -// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels func AndroidOSAPILevel(val string) attribute.KeyValue { return AndroidOSAPILevelKey.String(val) } @@ -73,6 +75,18 @@ var ( // Namespace: app const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + // AppInstallationIDKey is the attribute Key conforming to the // "app.installation.id" semantic conventions. It represents a unique identifier // representing the installation of an application on a specific device. @@ -106,16 +120,51 @@ const ( // - [App set ID]. // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. // - // More information about Android identifier best practices can be found [here] - // . + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. // // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations // [App set ID]: https://developer.android.com/identity/app-set-id // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID - // [here]: https://developer.android.com/training/articles/user-data-ids + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids AppInstallationIDKey = attribute.Key("app.installation.id") + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + // AppScreenCoordinateXKey is the attribute Key conforming to the // "app.screen.coordinate.x" semantic conventions. It represents the x // (horizontal) coordinate of a screen coordinate, in screen pixels. @@ -164,6 +213,13 @@ const ( AppWidgetNameKey = attribute.Key("app.widget.name") ) +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + // AppInstallationID returns an attribute KeyValue conforming to the // "app.installation.id" semantic conventions. It represents a unique identifier // representing the installation of an application on a specific device. @@ -171,6 +227,27 @@ func AppInstallationID(val string) attribute.KeyValue { return AppInstallationIDKey.String(val) } +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + // AppScreenCoordinateX returns an attribute KeyValue conforming to the // "app.screen.coordinate.x" semantic conventions. It represents the x // (horizontal) coordinate of a screen coordinate, in screen pixels. @@ -1525,59 +1602,14 @@ func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { // Enum values for aws.ecs.launchtype var ( - // ec2 + // Amazon EC2 // Stability: development AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate + // Amazon Fargate // Stability: development AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) -// Namespace: az -const ( - // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic - // conventions. It represents the [Azure Resource Provider Namespace] as - // recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzNamespaceKey = attribute.Key("az.namespace") - - // AzServiceRequestIDKey is the attribute Key conforming to the - // "az.service_request_id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzServiceRequestIDKey = attribute.Key("az.service_request_id") -) - -// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" -// semantic conventions. It represents the [Azure Resource Provider Namespace] as -// recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzNamespace(val string) attribute.KeyValue { - return AzNamespaceKey.String(val) -} - -// AzServiceRequestID returns an attribute KeyValue conforming to the -// "az.service_request_id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzServiceRequestID(val string) attribute.KeyValue { - return AzServiceRequestIDKey.String(val) -} - // Namespace: azure const ( // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" @@ -1665,6 +1697,31 @@ const ( // // Examples: 1000, 1002 AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") ) // AzureClientID returns an attribute KeyValue conforming to the @@ -1705,6 +1762,23 @@ func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { return AzureCosmosDBResponseSubStatusCodeKey.Int(val) } +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + // Enum values for azure.cosmosdb.connection.mode var ( // Gateway (HTTP) connection. @@ -1717,19 +1791,19 @@ var ( // Enum values for azure.cosmosdb.consistency.level var ( - // strong + // Strong // Stability: development AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // bounded_staleness + // Bounded Staleness // Stability: development AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // session + // Session // Stability: development AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // eventual + // Eventual // Stability: development AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // consistent_prefix + // Consistent Prefix // Stability: development AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") ) @@ -1944,37 +2018,37 @@ func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { // Enum values for cassandra.consistency.level var ( - // all + // All // Stability: development CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // each_quorum + // Each Quorum // Stability: development CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // quorum + // Quorum // Stability: development CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // local_quorum + // Local Quorum // Stability: development CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // one + // One // Stability: development CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // two + // Two // Stability: development CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // three + // Three // Stability: development CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // local_one + // Local One // Stability: development CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // any + // Any // Stability: development CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // serial + // Serial // Stability: development CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // local_serial + // Local Serial // Stability: development CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") ) @@ -2527,7 +2601,7 @@ const ( // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id CloudResourceIDKey = attribute.Key("cloud.resource_id") ) @@ -2604,25 +2678,25 @@ var ( CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") // Azure Container Apps // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") // Azure Container Instances // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") // Azure Kubernetes Service // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") // Azure Functions // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") // Azure App Service // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") // Azure Red Hat OpenShift // Stability: development - CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") // Google Bare Metal Solution (BMS) // Stability: development CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") @@ -3374,16 +3448,40 @@ const ( // Examples: "opentelemetry-autoconf" ContainerNameKey = attribute.Key("container.name") - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container runtime - // managing this container. + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. // // Type: string // RequirementLevel: Recommended // Stability: Development // // Examples: "docker", "containerd", "rkt" - ContainerRuntimeKey = attribute.Key("container.runtime") + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") ) // ContainerCommand returns an attribute KeyValue conforming to the @@ -3467,6 +3565,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue { return ContainerImageTagsKey.StringSlice(val) } +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + // ContainerName returns an attribute KeyValue conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. @@ -3474,11 +3579,26 @@ func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container runtime -// managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) } // Namespace: cpu @@ -3514,28 +3634,28 @@ func CPULogicalNumber(val int) attribute.KeyValue { // Enum values for cpu.mode var ( - // user + // User // Stability: development CPUModeUser = CPUModeKey.String("user") - // system + // System // Stability: development CPUModeSystem = CPUModeKey.String("system") - // nice + // Nice // Stability: development CPUModeNice = CPUModeKey.String("nice") - // idle + // Idle // Stability: development CPUModeIdle = CPUModeKey.String("idle") - // iowait + // IO Wait // Stability: development CPUModeIOWait = CPUModeKey.String("iowait") - // interrupt + // Interrupt // Stability: development CPUModeInterrupt = CPUModeKey.String("interrupt") - // steal + // Steal // Stability: development CPUModeSteal = CPUModeKey.String("steal") - // kernel + // Kernel // Stability: development CPUModeKernel = CPUModeKey.String("kernel") ) @@ -3794,6 +3914,22 @@ func DBOperationName(val string) attribute.KeyValue { return DBOperationNameKey.String(val) } +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + // DBQuerySummary returns an attribute KeyValue conforming to the // "db.query.summary" semantic conventions. It represents the low cardinality // summary of a database query. @@ -4194,8 +4330,8 @@ const ( // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be // used as values. // - // More information about Android identifier best practices can be found [here] - // . + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. // // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution // > should be taken when storing personal data or anything which can identify a @@ -4210,7 +4346,7 @@ const ( // > opt-in feature.> See [`app.installation.id`]> for a more // > privacy-preserving alternative. // - // [here]: https://developer.android.com/training/articles/user-data-ids + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id DeviceIDKey = attribute.Key("device.id") @@ -4308,6 +4444,17 @@ var ( // Namespace: dns const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" // semantic conventions. It represents the name being queried. // @@ -4323,6 +4470,13 @@ const ( DNSQuestionNameKey = attribute.Key("dns.question.name") ) +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + // DNSQuestionName returns an attribute KeyValue conforming to the // "dns.question.name" semantic conventions. It represents the name being // queried. @@ -4941,7 +5095,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") @@ -4951,7 +5105,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "logo-color" FeatureFlagKeyKey = attribute.Key("feature_flag.key") @@ -4962,7 +5116,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "Flag Manager" FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") @@ -4973,7 +5127,7 @@ const ( // // Type: Enum // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "static", "targeting_match", "error", "default" FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") @@ -4984,7 +5138,7 @@ const ( // // Type: any // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "#ff0000", true, 3 // Note: With some feature flag providers, feature flag results can be quite @@ -5004,7 +5158,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "red", "true", "on" // Note: A semantic identifier, commonly referred to as a variant, provides a @@ -5020,7 +5174,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "proj-1", "ab98sgs", "service1/dev" // @@ -5034,7 +5188,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "1", "01ABCDEF" FeatureFlagVersionKey = attribute.Key("feature_flag.version") @@ -5088,34 +5242,34 @@ func FeatureFlagVersion(val string) attribute.KeyValue { // Enum values for feature_flag.result.reason var ( // The resolved value is static (no dynamic evaluation). - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") // The resolved value fell back to a pre-configured value (no dynamic evaluation // occurred or dynamic evaluation yielded no result). - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") // The resolved value was the result of a dynamic evaluation, such as a rule or // specific user-targeting. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") // The resolved value was the result of pseudorandom assignment. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") // The resolved value was retrieved from cache. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") // The resolved value was the result of the flag being disabled in the // management system. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") // The reason for the resolved value could not be determined. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") // The resolved value is non-authoritative or possibly out of date - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") // The resolved value was the result of an error. - // Stability: development + // Stability: release_candidate FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") ) @@ -5208,7 +5362,7 @@ const ( // RequirementLevel: Recommended // Stability: Development // - // Examples: "Zone.Identifer" + // Examples: "Zone.Identifier" // Note: On Linux, a resource fork is used to store additional data with a // filesystem object. A file always has at least one fork for the data portion, // and additional forks may exist. @@ -5863,39 +6017,41 @@ const ( // `db.*`, to further identify and describe the data source. GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") - // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.request.service_tier" semantic conventions. It represents the - // service tier requested. May be a specific tier, default, or auto. + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. // - // Type: Enum + // Type: any // RequirementLevel: Recommended // Stability: Development // - // Examples: "auto", "default" - GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") - - // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. // - // Type: string - // RequirementLevel: Recommended - // Stability: Development + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. // - // Examples: "scale", "default" - GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") - - // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to - // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It - // represents a fingerprint to track any eventual change in the Generative AI - // environment. + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. // - // Type: string - // RequirementLevel: Recommended - // Stability: Development + // See [Recording content on attributes] + // section for more details. // - // Examples: "fp_44709d6fcb" - GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") // GenAIOperationNameKey is the attribute Key conforming to the // "gen_ai.operation.name" semantic conventions. It represents the name of the @@ -5913,6 +6069,44 @@ const ( // libraries SHOULD use applicable predefined value. GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + // GenAIOutputTypeKey is the attribute Key conforming to the // "gen_ai.output.type" semantic conventions. It represents the represents the // content type requested by the client. @@ -5931,6 +6125,35 @@ const ( // `gen_ai.output.{type}.*` attributes. GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + // GenAIRequestChoiceCountKey is the attribute Key conforming to the // "gen_ai.request.choice.count" semantic conventions. It represents the target // number of candidate completions to return. @@ -6088,31 +6311,44 @@ const ( // Examples: "gpt-4-0613" GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as identified - // by the client or server instrumentation. + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. // - // Type: Enum + // Type: any // RequirementLevel: Recommended // Stability: Development // - // Examples: openai - // Note: The `gen_ai.system` describes a family of GenAI models with specific - // model identified - // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. // - // The actual GenAI product may differ from the one identified by the client. - // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI - // client - // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge, instead of the actual system. The - // `server.address` - // attribute may help identify the actual system in use for `openai`. + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. // - // For custom model, a custom friendly name SHOULD be used. - // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` - // . - GenAISystemKey = attribute.Key("gen_ai.system") + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" // semantic conventions. It represents the type of token being counted. @@ -6237,21 +6473,6 @@ func GenAIDataSourceID(val string) attribute.KeyValue { return GenAIDataSourceIDKey.String(val) } -// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the -// "gen_ai.openai.response.service_tier" semantic conventions. It represents the -// service tier used for the response. -func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { - return GenAIOpenAIResponseServiceTierKey.String(val) -} - -// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming -// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It -// represents a fingerprint to track any eventual change in the Generative AI -// environment. -func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { - return GenAIOpenAIResponseSystemFingerprintKey.String(val) -} - // GenAIRequestChoiceCount returns an attribute KeyValue conforming to the // "gen_ai.request.choice.count" semantic conventions. It represents the target // number of candidate completions to return. @@ -6393,16 +6614,6 @@ func GenAIUsageOutputTokens(val int) attribute.KeyValue { return GenAIUsageOutputTokensKey.Int(val) } -// Enum values for gen_ai.openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") -) - // Enum values for gen_ai.operation.name var ( // Chat completion operation such as [OpenAI Chat API] @@ -6452,57 +6663,79 @@ var ( GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") ) -// Enum values for gen_ai.system +// Enum values for gen_ai.provider.name var ( - // OpenAI + // [OpenAI] // Stability: development - GenAISystemOpenAI = GenAISystemKey.String("openai") + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") // Any Google generative AI endpoint // Stability: development - GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") - // Vertex AI + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] // Stability: development - GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") - // Gemini + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] // Stability: development - GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") - // Deprecated: Use 'gcp.vertex_ai' instead. - GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") - // Deprecated: Use 'gcp.gemini' instead. - GenAISystemGemini = GenAISystemKey.String("gemini") - // Anthropic + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] // Stability: development - GenAISystemAnthropic = GenAISystemKey.String("anthropic") - // Cohere + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] // Stability: development - GenAISystemCohere = GenAISystemKey.String("cohere") + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") // Azure AI Inference // Stability: development - GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") - // Azure OpenAI + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] // Stability: development - GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") - // IBM Watsonx AI + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] // Stability: development - GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") - // AWS Bedrock + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] // Stability: development - GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") - // Perplexity + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] // Stability: development - GenAISystemPerplexity = GenAISystemKey.String("perplexity") - // xAI + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] // Stability: development - GenAISystemXai = GenAISystemKey.String("xai") - // DeepSeek + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] // Stability: development - GenAISystemDeepseek = GenAISystemKey.String("deepseek") - // Groq + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] // Stability: development - GenAISystemGroq = GenAISystemKey.String("groq") - // Mistral AI + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] // Stability: development - GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") ) // Enum values for gen_ai.token.type @@ -6510,8 +6743,6 @@ var ( // Input tokens (prompt, input, etc.) // Stability: development GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Deprecated: Replaced by `output`. - GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") // Output tokens (completion, response, etc.) // Stability: development GenAITokenTypeOutput = GenAITokenTypeKey.String("output") @@ -7312,6 +7543,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue { return HTTPRequestBodySizeKey.Int(val) } +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + // HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. @@ -7347,6 +7586,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue { return HTTPResponseBodySizeKey.Int(val) } +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + // HTTPResponseSize returns an attribute KeyValue conforming to the // "http.response.size" semantic conventions. It represents the total size of the // response in bytes. This should be the total number of bytes sent over the @@ -7418,6 +7665,94 @@ var ( // Namespace: hw const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. // It represents an identifier for the hardware component, unique within the // monitored host. @@ -7429,6 +7764,60 @@ const ( // Examples: "win32battery_battery_testsysa33_1" HwIDKey = attribute.Key("hw.id") + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + // HwNameKey is the attribute Key conforming to the "hw.name" semantic // conventions. It represents an easily-recognizable name for the hardware // component. @@ -7440,6 +7829,28 @@ const ( // Examples: "eth0" HwNameKey = attribute.Key("hw.name") + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic // conventions. It represents the unique identifier of the parent component // (typically the `hw.id` attribute of the enclosure, or disk controller). @@ -7451,6 +7862,65 @@ const ( // Examples: "dellStorage_perc_0" HwParentKey = attribute.Key("hw.parent") + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + // HwStateKey is the attribute Key conforming to the "hw.state" semantic // conventions. It represents the current state of the component. // @@ -7461,6 +7931,17 @@ const ( // Examples: HwStateKey = attribute.Key("hw.state") + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic // conventions. It represents the type of the component. // @@ -7474,8 +7955,62 @@ const ( // `hw.state=degraded` would indicate that the temperature of the hardware // component has been reported as `degraded`. HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") ) +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + // HwID returns an attribute KeyValue conforming to the "hw.id" semantic // conventions. It represents an identifier for the hardware component, unique // within the monitored host. @@ -7483,6 +8018,26 @@ func HwID(val string) attribute.KeyValue { return HwIDKey.String(val) } +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + // HwName returns an attribute KeyValue conforming to the "hw.name" semantic // conventions. It represents an easily-recognizable name for the hardware // component. @@ -7490,6 +8045,20 @@ func HwName(val string) attribute.KeyValue { return HwNameKey.String(val) } +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + // HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic // conventions. It represents the unique identifier of the parent component // (typically the `hw.id` attribute of the enclosure, or disk controller). @@ -7497,17 +8066,144 @@ func HwParent(val string) attribute.KeyValue { return HwParentKey.String(val) } +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + // Enum values for hw.state var ( - // Ok - // Stability: development - HwStateOk = HwStateKey.String("ok") // Degraded // Stability: development HwStateDegraded = HwStateKey.String("degraded") // Failed // Stability: development HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") ) // Enum values for hw.type @@ -7686,6 +8382,36 @@ const ( // Examples: "Evicted", "Error" K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" // semantic conventions. It represents the name of the CronJob. // @@ -7749,6 +8475,18 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic // conventions. It represents the name of the horizontal pod autoscaler. // @@ -7759,6 +8497,43 @@ const ( // Examples: "opentelemetry" K8SHPANameKey = attribute.Key("k8s.hpa.name") + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic // conventions. It represents the UID of the horizontal pod autoscaler. // @@ -7769,6 +8544,17 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic // conventions. It represents the name of the Job. // @@ -7815,6 +8601,46 @@ const ( // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // @@ -7910,6 +8736,25 @@ const ( // Examples: "opentelemetry" K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + // K8SResourceQuotaUIDKey is the attribute Key conforming to the // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the // resource quota. @@ -7943,6 +8788,19 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" // semantic conventions. It represents the name of the K8s volume. // @@ -8001,6 +8859,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { return K8SContainerStatusLastTerminatedReasonKey.String(val) } +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -8014,6 +8888,22 @@ func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. @@ -8028,6 +8918,22 @@ func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. @@ -8042,18 +8948,69 @@ func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + // K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" // semantic conventions. It represents the name of the horizontal pod autoscaler. func K8SHPAName(val string) attribute.KeyValue { return K8SHPANameKey.String(val) } +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + // K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" // semantic conventions. It represents the UID of the horizontal pod autoscaler. func K8SHPAUID(val string) attribute.KeyValue { return K8SHPAUIDKey.String(val) } +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { @@ -8066,6 +9023,22 @@ func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. @@ -8073,6 +9046,22 @@ func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + // K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { @@ -8085,6 +9074,21 @@ func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { @@ -8097,6 +9101,22 @@ func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. @@ -8132,6 +9152,13 @@ func K8SResourceQuotaName(val string) attribute.KeyValue { return K8SResourceQuotaNameKey.String(val) } +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + // K8SResourceQuotaUID returns an attribute KeyValue conforming to the // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the // resource quota. @@ -8139,6 +9166,22 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. @@ -8153,6 +9196,15 @@ func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + // K8SVolumeName returns an attribute KeyValue conforming to the // "k8s.volume.name" semantic conventions. It represents the name of the K8s // volume. @@ -8160,6 +9212,50 @@ func K8SVolumeName(val string) attribute.KeyValue { return K8SVolumeNameKey.String(val) } +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + // Enum values for k8s.namespace.phase var ( // Active namespace phase as described by [K8s API] @@ -8174,6 +9270,39 @@ var ( K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") ) +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + // Enum values for k8s.volume.type var ( // A [persistentVolumeClaim] volume @@ -8371,6 +9500,27 @@ var ( LogIostreamStderr = LogIostreamKey.String("stderr") ) +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + // Namespace: messaging const ( // MessagingBatchMessageCountKey is the attribute Key conforming to the @@ -9084,10 +10234,6 @@ var ( // // Stability: development MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") - // Deprecated: Replaced by `process`. - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") - // Deprecated: Replaced by `send`. - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") ) // Enum values for messaging.rocketmq.consumption_model @@ -9137,6 +10283,9 @@ var ( // Apache ActiveMQ // Stability: development MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") // Amazon Simple Queue Service (SQS) // Stability: development MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") @@ -9654,6 +10803,66 @@ func OCIManifestDigest(val string) attribute.KeyValue { return OCIManifestDigestKey.String(val) } +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + // Namespace: opentracing const ( // OpenTracingRefTypeKey is the attribute Key conforming to the @@ -9802,7 +11011,7 @@ var ( OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS // Stability: development - OSTypeZOS = OSTypeKey.String("z_os") + OSTypeZOS = OSTypeKey.String("zos") ) // Namespace: otel @@ -9866,6 +11075,17 @@ const ( // Examples: "io.opentelemetry.contrib.mongodb" OTelScopeNameKey = attribute.Key("otel.scope.name") + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). @@ -9877,6 +11097,20 @@ const ( // Examples: "1.0.0" OTelScopeVersionKey = attribute.Key("otel.scope.version") + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + // OTelSpanSamplingResultKey is the attribute Key conforming to the // "otel.span.sampling_result" semantic conventions. It represents the result // value of the sampler for this span. @@ -9926,6 +11160,13 @@ func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). @@ -9970,6 +11211,10 @@ var ( // // Stability: development OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") // OTLP log record exporter over gRPC with protobuf serialization // // Stability: development @@ -9998,6 +11243,27 @@ var ( // // Stability: development OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") ) // Enum values for otel.span.sampling_result @@ -10497,6 +11763,14 @@ func ProcessCreationTime(val string) attribute.KeyValue { return ProcessCreationTimeKey.String(val) } +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + // ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the // "process.executable.build_id.gnu" semantic conventions. It represents the GNU // build ID as found in the `.note.gnu.build-id` ELF section (hex string). @@ -10965,6 +12239,38 @@ const ( RPCSystemKey = attribute.Key("rpc.system") ) +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + // RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` // property of response if it is an error response. @@ -11820,15 +13126,12 @@ var ( // Enum values for system.memory.state var ( - // used + // Actual used virtual memory in bytes. // Stability: development SystemMemoryStateUsed = SystemMemoryStateKey.String("used") // free // Stability: development SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // Deprecated: Removed, report shared memory usage with - // `metric.system.memory.shared` metric. - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") // buffers // Stability: development SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") @@ -13727,8 +15030,6 @@ var ( // // [GitLab]: https://gitlab.com VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") - // Deprecated: Replaced by `gitea`. - VCSProviderNameGittea = VCSProviderNameKey.String("gittea") // [Gitea] // Stability: development // @@ -13848,4 +15149,45 @@ func WebEngineName(val string) attribute.KeyValue { // engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) } \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go index 2c5c7ebd..11101032 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.34.0 +// patterns for OpenTelemetry things. This package represents the v1.37.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go new file mode 100644 index 00000000..666bded4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go index 88a998f1..e67469a4 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go index 79843adb..55bde895 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -1,5 +1,8 @@ // Code generated from semantic convention specification. DO NOT EDIT. +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + // Package httpconv provides types and functionality for OpenTelemetry semantic // conventions in the "http" namespace. package httpconv @@ -131,17 +134,14 @@ func (ClientActiveRequests) Description() string { return "Number of active HTTP requests." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientActiveRequests) Add( ctx context.Context, incr int64, @@ -149,6 +149,11 @@ func (m ClientActiveRequests) Add( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -169,6 +174,23 @@ func (m ClientActiveRequests) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrURLTemplate returns an optional attribute for the "url.template" semantic // convention. It represents the low-cardinality template of an // [absolute path reference]. @@ -244,17 +266,14 @@ func (ClientConnectionDuration) Description() string { return "The duration of the successfully established outbound HTTP connections." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientConnectionDuration) Record( ctx context.Context, val float64, @@ -262,6 +281,11 @@ func (m ClientConnectionDuration) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -282,6 +306,22 @@ func (m ClientConnectionDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrNetworkPeerAddress returns an optional attribute for the // "network.peer.address" semantic convention. It represents the peer address of // the network connection - IP address or Unix domain socket name. @@ -356,7 +396,7 @@ func (ClientOpenConnections) Description() string { return "Number of outbound HTTP connections that are currently active or idle on the client." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The connectionState is the state of the HTTP connection in the HTTP connection // pool. @@ -364,12 +404,9 @@ func (ClientOpenConnections) Description() string { // The serverAddress is the server domain name if available without reverse DNS // lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientOpenConnections) Add( ctx context.Context, incr int64, @@ -378,6 +415,11 @@ func (m ClientOpenConnections) Add( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -399,6 +441,23 @@ func (m ClientOpenConnections) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrNetworkPeerAddress returns an optional attribute for the // "network.peer.address" semantic convention. It represents the peer address of // the network connection - IP address or Unix domain socket name. @@ -472,21 +531,17 @@ func (ClientRequestBodySize) Description() string { return "Size of HTTP client request bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. // -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length] header. For requests using transport encoding, this should be @@ -501,6 +556,11 @@ func (m ClientRequestBodySize) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -522,6 +582,29 @@ func (m ClientRequestBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -622,20 +705,16 @@ func (ClientRequestDuration) Description() string { return "Duration of HTTP client requests." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. -// -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin func (m ClientRequestDuration) Record( ctx context.Context, val float64, @@ -644,6 +723,11 @@ func (m ClientRequestDuration) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -665,6 +749,22 @@ func (m ClientRequestDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -765,21 +865,17 @@ func (ClientResponseBodySize) Description() string { return "Size of HTTP client response bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // -// The serverAddress is the host identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. // -// The serverPort is the port identifier of the ["URI origin"] HTTP request is -// sent to. +// The serverPort is the server port number. // // All additional attrs passed are included in the recorded value. // -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin -// // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length] header. For requests using transport encoding, this should be @@ -794,6 +890,11 @@ func (m ClientResponseBodySize) Record( serverPort int, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -815,6 +916,29 @@ func (m ClientResponseBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -915,7 +1039,7 @@ func (ServerActiveRequests) Description() string { return "Number of active HTTP server requests." } -// Add adds incr to the existing count. +// Add adds incr to the existing count for attrs. // // The requestMethod is the HTTP request method. // @@ -931,6 +1055,11 @@ func (m ServerActiveRequests) Add( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -951,6 +1080,23 @@ func (m ServerActiveRequests) Add( m.Int64UpDownCounter.Add(ctx, incr, *o...) } +// AddSet adds incr to the existing count for set. +func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + // AttrServerAddress returns an optional attribute for the "server.address" // semantic convention. It represents the name of the local HTTP server that // received the request. @@ -1015,7 +1161,7 @@ func (ServerRequestBodySize) Description() string { return "Size of HTTP server request bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1038,6 +1184,11 @@ func (m ServerRequestBodySize) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1058,6 +1209,29 @@ func (m ServerRequestBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -1168,7 +1342,7 @@ func (ServerRequestDuration) Description() string { return "Duration of HTTP server requests." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1184,6 +1358,11 @@ func (m ServerRequestDuration) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1204,6 +1383,22 @@ func (m ServerRequestDuration) Record( m.Float64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. @@ -1314,7 +1509,7 @@ func (ServerResponseBodySize) Description() string { return "Size of HTTP server response bodies." } -// Record records val to the current distribution. +// Record records val to the current distribution for attrs. // // The requestMethod is the HTTP request method. // @@ -1337,6 +1532,11 @@ func (m ServerResponseBodySize) Record( urlScheme string, attrs ...attribute.KeyValue, ) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -1357,6 +1557,29 @@ func (m ServerResponseBodySize) Record( m.Int64Histogram.Record(ctx, val, *o...) } +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + // AttrErrorType returns an optional attribute for the "error.type" semantic // convention. It represents the describes a class of error the operation ended // with. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 00000000..a78eafd1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2126 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go index 3c23d459..f8a0b704 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index f3aa3981..8763936a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = autoTracerProvider{} -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { cfg := NewTracerConfig(opts...) return autoTracer{ name: name, @@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt // Expected to be implemented in eBPF. // //go:noinline -func (t *autoTracer) start( +func (*autoTracer) start( ctx context.Context, spanPtr *autoSpan, psc *SpanContext, diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a..aea11a2b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go new file mode 100644 index 00000000..1cbef1d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go index f663547b..ff0f6eac 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr { return Attr{key, MapValue(value...)} } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a Attr) Equal(b Attr) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go index 7b1ae3c4..bea56f2e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -22,7 +22,7 @@ func (tid TraceID) String() string { return hex.EncodeToString(tid[:]) } -// IsEmpty returns false if id contains at least one non-zero byte. +// IsEmpty reports whether the TraceID contains only zero bytes. func (tid TraceID) IsEmpty() bool { return tid == [traceIDSize]byte{} } @@ -50,7 +50,7 @@ func (sid SpanID) String() string { return hex.EncodeToString(sid[:]) } -// IsEmpty returns true if the span ID contains at least one non-zero byte. +// IsEmpty reports whether the SpanID contains only zero bytes. func (sid SpanID) IsEmpty() bool { return sid == [spanIDSize]byte{} } @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index ae9ce102..cb7927b8 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 0f56e4db..400fab12 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 64a4f1b3..689d220d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index d49adf67..ee6f4bcb 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID } - return t, nil + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID + } + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength + return [8]byte{}, errInvalidSpanIDLength } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !s.IsValid() { - return s, errNilSpanID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err - } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index dc5e34ca..073adae2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7afe92b5..bcaa5aa5 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9d4742a1..07145e25 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.37.0 + version: v1.38.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.59.0 + version: v0.60.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.13.0 + version: v0.14.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,7 +36,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.13 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index ec187b13..ff828279 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -994,6 +994,16 @@ type NumberDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1144,6 +1154,16 @@ type HistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1331,6 +1351,16 @@ type ExponentialHistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1546,6 +1576,16 @@ type SummaryDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index eb7745d6..3bcfcd82 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -44,6 +44,16 @@ type Resource struct { // Set of attributes that describe the resource. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index b342a0a9..f9bdfc0a 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -534,10 +534,18 @@ type Span struct { // "example.com/myattribute": true // "example.com/score": 10.239 // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many @@ -777,6 +785,16 @@ type Span_Event struct { // attributes is a collection of attribute key/value pairs on the event. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. @@ -862,6 +880,16 @@ type Span_Link struct { // attributes is a collection of attribute key/value pairs on the link. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // + // The attribute values SHOULD NOT contain empty values. + // The attribute values SHOULD NOT contain bytes values. + // The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values, + // double values. + // The attribute values SHOULD NOT contain kvlist values. + // The behavior of software that receives attributes containing such values can be unpredictable. + // These restrictions can change in a minor release. + // The restrictions take origin from the OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute. Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 00000000..7348c50c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/go.yaml.in/yaml/v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 00000000..c9388da4 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/go.yaml.in/yaml/v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go new file mode 100644 index 00000000..acf71402 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/go.yaml.in/yaml/v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/go.yaml.in/yaml/v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go new file mode 100644 index 00000000..0b9bb603 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go new file mode 100644 index 00000000..5248e126 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/go.yaml.in/yaml/v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go new file mode 100644 index 00000000..f6a9c8e3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 21ca3b2e..8ff087df 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -36,7 +36,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) { curve := ecdh.X25519() priv, err := curve.NewPrivateKey(scalar[:]) if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + panic("curve25519: " + err.Error()) } copy(dst[:], priv.PublicKey().Bytes()) } diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index f2ec0896..8bfad16c 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -83,6 +83,7 @@ var ( // supportedKexAlgos specifies key-exchange algorithms implemented by this // package in preference order, excluding those with security issues. supportedKexAlgos = []string{ + KeyExchangeMLKEM768X25519, KeyExchangeCurve25519, KeyExchangeECDHP256, KeyExchangeECDHP384, @@ -94,6 +95,7 @@ var ( // defaultKexAlgos specifies the default preference for key-exchange // algorithms in preference order. defaultKexAlgos = []string{ + KeyExchangeMLKEM768X25519, KeyExchangeCurve25519, KeyExchangeECDHP256, KeyExchangeECDHP384, diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index cf388a92..78aaf031 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -9,7 +9,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" - "crypto/subtle" "encoding/binary" "errors" "fmt" @@ -439,6 +438,7 @@ func init() { kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{} kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} + kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{} } // curve25519sha256 implements the curve25519-sha256 (formerly known as @@ -454,15 +454,17 @@ func (kp *curve25519KeyPair) generate(rand io.Reader) error { if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { return err } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + p, err := curve25519.X25519(kp.priv[:], curve25519.Basepoint) + if err != nil { + return fmt.Errorf("curve25519: %w", err) + } + if len(p) != 32 { + return fmt.Errorf("curve25519: internal error: X25519 returned %d bytes, expected 32", len(p)) + } + copy(kp.pub[:], p) return nil } -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { var kp curve25519KeyPair if err := kp.generate(rand); err != nil { @@ -485,11 +487,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh return nil, errors.New("ssh: peer's curve25519 public value has wrong length") } - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + secret, err := curve25519.X25519(kp.priv[:], reply.EphemeralPubKey) + if err != nil { + return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err) } h := crypto.SHA256.New() @@ -531,11 +531,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh return nil, err } - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + secret, err := curve25519.X25519(kp.priv[:], kexInit.ClientPubKey) + if err != nil { + return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err) } hostKeyBytes := priv.PublicKey().Marshal() diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go index c022e411..1ebd7e6d 100644 --- a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go @@ -421,20 +421,26 @@ func New(files ...string) (ssh.HostKeyCallback, error) { return certChecker.CheckHostKey, nil } -// Normalize normalizes an address into the form used in known_hosts +// Normalize normalizes an address into the form used in known_hosts. Supports +// IPv4, hostnames, bracketed IPv6. Any other non-standard formats are returned +// with minimal transformation. func Normalize(address string) string { + const defaultSSHPort = "22" + host, port, err := net.SplitHostPort(address) if err != nil { host = address - port = "22" + port = defaultSSHPort } - entry := host - if port != "22" { - entry = "[" + entry + "]:" + port - } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") { - entry = "[" + entry + "]" + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + host = host[1 : len(host)-1] } - return entry + + if port == defaultSSHPort { + return host + } + return "[" + host + "]:" + port } // Line returns a line to add append to the known_hosts files. diff --git a/vendor/golang.org/x/crypto/ssh/mlkem.go b/vendor/golang.org/x/crypto/ssh/mlkem.go index 657e1079..ddc0ed1f 100644 --- a/vendor/golang.org/x/crypto/ssh/mlkem.go +++ b/vendor/golang.org/x/crypto/ssh/mlkem.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.24 - package ssh import ( @@ -13,23 +11,10 @@ import ( "errors" "fmt" "io" - "runtime" - "slices" "golang.org/x/crypto/curve25519" ) -func init() { - // After Go 1.24rc1 mlkem swapped the order of return values of Encapsulate. - // See #70950. - if runtime.Version() == "go1.24rc1" { - return - } - supportedKexAlgos = slices.Insert(supportedKexAlgos, 0, KeyExchangeMLKEM768X25519) - defaultKexAlgos = slices.Insert(defaultKexAlgos, 0, KeyExchangeMLKEM768X25519) - kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{} -} - // mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with // curve25519-sha256 key exchange method, as described by // draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index db1c95fa..d3cb9517 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -6,7 +6,7 @@ // cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name [context], and migrating to it can be done automatically with [go fix]. +// name [context]. // // Incoming requests to a server should create a [Context], and outgoing // calls to servers should accept a Context. The chain of function @@ -38,8 +38,6 @@ // // See https://go.dev/blog/context for example code for a server that uses // Contexts. -// -// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs package context import ( @@ -51,36 +49,37 @@ import ( // API boundaries. // // Context's methods may be called by multiple goroutines simultaneously. +// +//go:fix inline type Context = context.Context // Canceled is the error returned by [Context.Err] when the context is canceled // for some reason other than its deadline passing. +// +//go:fix inline var Canceled = context.Canceled // DeadlineExceeded is the error returned by [Context.Err] when the context is canceled // due to its deadline passing. +// +//go:fix inline var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. -func Background() Context { - return background -} +// +//go:fix inline +func Background() Context { return context.Background() } // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). -func TODO() Context { - return todo -} - -var ( - background = context.Background() - todo = context.TODO() -) +// +//go:fix inline +func TODO() Context { return context.TODO() } // A CancelFunc tells an operation to abandon its work. // A CancelFunc does not wait for the work to stop. @@ -95,6 +94,8 @@ type CancelFunc = context.CancelFunc // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { return context.WithCancel(parent) } @@ -108,6 +109,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { return context.WithDeadline(parent, d) } @@ -122,6 +125,8 @@ func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } +// +//go:fix inline func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return context.WithTimeout(parent, timeout) } @@ -139,6 +144,8 @@ func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { // interface{}, context keys often have concrete type // struct{}. Alternatively, exported context key variables' static // type should be a pointer or interface. +// +//go:fix inline func WithValue(parent Context, key, val interface{}) Context { return context.WithValue(parent, key, val) } diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index ca645d9a..02fe0c2d 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -55,7 +55,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -81,7 +81,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +120,45 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55..00000000 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c6..00000000 --- a/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8..9921ca09 100644 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index ea5ae629..6878f8ec 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -15,7 +15,6 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -255,15 +254,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -314,24 +311,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -417,14 +408,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 51fca38f..64085f6e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -176,39 +176,6 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { @@ -423,6 +390,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +408,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +608,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +657,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +818,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +850,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +933,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +962,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1077,7 +1045,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1141,10 +1109,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1160,6 +1128,21 @@ var errChanPool = sync.Pool{ New: func() interface{} { return make(chan error, 1) }, } +func getErrChan() chan error { + if inTests { + // Channels cannot be reused across synctest tests. + return make(chan error, 1) + } else { + return errChanPool.Get().(chan error) + } +} + +func putErrChan(ch chan error) { + if !inTests { + errChanPool.Put(ch) + } +} + var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1167,7 +1150,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1199,7 +1182,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1513,7 +1496,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2118,7 +2101,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2216,7 +2199,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2405,7 +2388,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2454,7 +2436,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2466,7 +2448,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2573,7 +2555,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2702,7 +2684,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2824,7 +2806,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2840,9 +2822,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2850,7 +2832,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2866,9 +2848,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3147,7 +3129,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: getErrChan(), } select { @@ -3164,7 +3146,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + putErrChan(msg.done) return err } } diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b8..00000000 --- a/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f26356b9..35e39025 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -193,50 +193,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +322,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -534,14 +490,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +505,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +604,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +653,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -838,14 +793,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +809,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +857,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +868,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1120,7 +1071,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1137,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1207,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1427,7 +1376,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1558,9 +1506,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1753,7 +1701,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2092,10 +2040,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2121,7 +2069,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2188,9 +2135,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2250,9 +2197,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2998,7 +2945,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3228,7 +3174,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 1d8cffae..00000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -// -// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks -// returning errors. -package errgroup - -import ( - "context" - "fmt" - "sync" -) - -type token struct{} - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. A Group should not be reused for different tasks. -// -// A zero Group is valid, has no limit on the number of active goroutines, -// and does not cancel on error. -type Group struct { - cancel func(error) - - wg sync.WaitGroup - - sem chan token - - errOnce sync.Once - err error -} - -func (g *Group) done() { - if g.sem != nil { - <-g.sem - } - g.wg.Done() -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancelCause(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel(g.err) - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to Go must happen before a Wait. -// It blocks until the new goroutine can be added without the number of -// goroutines in the group exceeding the configured limit. -// -// The first goroutine in the group that returns a non-nil error will -// cancel the associated Context, if any. The error will be returned -// by Wait. -func (g *Group) Go(f func() error) { - if g.sem != nil { - g.sem <- token{} - } - - g.wg.Add(1) - go func() { - defer g.done() - - // It is tempting to propagate panics from f() - // up to the goroutine that calls Wait, but - // it creates more problems than it solves: - // - it delays panics arbitrarily, - // making bugs harder to detect; - // - it turns f's panic stack into a mere value, - // hiding it from crash-monitoring tools; - // - it risks deadlocks that hide the panic entirely, - // if f's panic leaves the program in a state - // that prevents the Wait call from being reached. - // See #53757, #74275, #74304, #74306. - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel(g.err) - } - }) - } - }() -} - -// TryGo calls the given function in a new goroutine only if the number of -// active goroutines in the group is currently below the configured limit. -// -// The return value reports whether the goroutine was started. -func (g *Group) TryGo(f func() error) bool { - if g.sem != nil { - select { - case g.sem <- token{}: - // Note: this allows barging iff channels in general allow barging. - default: - return false - } - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel(g.err) - } - }) - } - }() - return true -} - -// SetLimit limits the number of active goroutines in this group to at most n. -// A negative value indicates no limit. -// A limit of zero will prevent any new goroutines from being added. -// -// Any subsequent call to the Go method will block until it can add an active -// goroutine without exceeding the configured limit. -// -// The limit must not be modified while any goroutines in the group are active. -func (g *Group) SetLimit(n int) { - if n < 0 { - g.sem = nil - return - } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) - } - g.sem = make(chan token, n) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de7..00000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb945821..7a76489d 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81ac..3c7a6d6e 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,9 +38,7 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { - for i := range s { - s[i] = 0 - } + clear(s[:]) } func cpuBitsIndex(cpu int) int { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc39554..18a3d9bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413..b4609c20 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index cd236443..944e75a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -632,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -689,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -740,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -3052,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3086,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index fc1835d8..bc1ce436 100644 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall } func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class * } func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { } func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { } func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3 } func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint } func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype } func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 958bcf47..993a2297 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1976,6 +1976,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a58bc48b..641a5f4b 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -546,25 +546,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -574,7 +574,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -586,7 +586,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -594,7 +594,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -602,7 +602,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -610,7 +610,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -618,7 +618,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -626,7 +626,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -634,7 +634,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -642,7 +642,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -650,7 +650,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +658,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -675,7 +675,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -683,7 +683,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -691,7 +691,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -703,7 +703,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +711,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -720,7 +720,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -728,7 +728,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -736,7 +736,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -744,7 +744,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -752,7 +752,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -760,7 +760,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -768,7 +768,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -776,7 +776,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -784,7 +784,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -792,13 +792,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -806,7 +806,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -814,7 +814,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -829,7 +829,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -837,7 +837,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -853,7 +853,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -867,7 +867,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -876,7 +876,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -886,7 +886,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -895,7 +895,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -911,7 +911,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -921,7 +921,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -929,25 +929,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -955,7 +955,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -963,7 +963,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -979,7 +979,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -987,7 +987,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -996,25 +996,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1022,7 +1022,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1030,7 +1030,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1038,7 +1038,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1046,7 +1046,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1054,7 +1054,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1062,7 +1062,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1070,7 +1070,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1079,7 +1079,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1092,7 +1092,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1100,7 +1100,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1108,7 +1108,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1120,7 +1120,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1128,7 +1128,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1136,7 +1136,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1144,7 +1144,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1152,7 +1152,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1160,7 +1160,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1176,7 +1176,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1184,7 +1184,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1192,7 +1192,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1200,7 +1200,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1208,7 +1208,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1217,7 +1217,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1226,7 +1226,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1234,7 +1234,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1242,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1250,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1267,7 +1267,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1275,7 +1275,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1291,7 +1291,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1303,7 +1303,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1315,7 +1315,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1323,7 +1323,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1336,7 +1336,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1344,7 +1344,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1352,7 +1352,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1360,7 +1360,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1368,7 +1368,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1376,7 +1376,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1384,7 +1384,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1392,7 +1392,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,7 +1400,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1408,7 +1408,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1417,7 +1417,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1425,13 +1425,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1440,7 +1440,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1449,7 +1449,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1458,18 +1458,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1477,7 +1477,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1485,13 +1485,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1500,7 +1500,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1509,7 +1509,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1521,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1530,7 +1530,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1538,7 +1538,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1546,7 +1546,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1554,7 +1554,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1562,7 +1562,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1571,7 +1571,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1586,7 +1586,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1594,12 +1594,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1607,7 +1607,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1615,7 +1615,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1623,7 +1623,7 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { } func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1631,7 +1631,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1639,7 +1639,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1647,7 +1647,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1655,7 +1655,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1663,7 +1663,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1675,7 +1675,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1687,7 +1687,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1695,7 +1695,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1704,7 +1704,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1712,7 +1712,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1720,7 +1720,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1728,7 +1728,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1736,7 +1736,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1744,7 +1744,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1752,12 +1752,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1765,7 +1765,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1773,7 +1773,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1782,7 +1782,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1791,7 +1791,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1800,7 +1800,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1809,7 +1809,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1817,7 +1817,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1826,7 +1826,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1835,7 +1835,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1848,7 +1848,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1857,7 +1857,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1866,7 +1866,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1878,7 +1878,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1886,7 +1886,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1894,7 +1894,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1902,7 +1902,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1911,7 +1911,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1919,7 +1919,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1927,12 +1927,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1940,7 +1940,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1948,7 +1948,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1960,7 +1960,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1968,7 +1968,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1976,12 +1976,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1990,7 +1990,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +1998,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2019,7 +2019,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2028,7 +2028,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2037,7 +2037,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2046,7 +2046,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2055,7 +2055,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2063,7 +2063,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2071,7 +2071,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2079,7 +2079,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2087,7 +2087,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2096,7 +2096,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2104,7 +2104,7 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2112,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2120,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2132,7 +2132,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2141,7 +2141,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2149,7 +2149,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2157,7 +2157,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2165,19 +2165,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2185,7 +2185,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2193,7 +2193,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2201,13 +2201,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2215,7 +2215,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2223,7 +2223,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2232,7 +2232,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2240,7 +2240,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2249,7 +2249,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2257,7 +2257,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2266,19 +2266,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2286,13 +2286,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2301,7 +2301,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2310,7 +2310,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2318,7 +2318,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2326,7 +2326,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2335,7 +2335,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2343,7 +2343,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2351,7 +2351,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2359,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2368,7 +2368,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2377,7 +2377,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2386,13 +2386,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2400,7 +2400,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2409,7 +2409,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2418,7 +2418,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2427,13 +2427,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2442,7 +2442,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2450,7 +2450,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2458,7 +2458,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2466,7 +2466,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2474,7 +2474,7 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2486,7 +2486,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2494,7 +2494,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2512,7 +2512,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2521,7 +2521,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2530,7 +2530,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2538,7 +2538,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2546,7 +2546,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2554,12 +2554,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2567,7 +2567,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2576,12 +2576,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2590,7 +2590,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2599,7 +2599,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2607,17 +2607,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2626,7 +2626,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2635,7 +2635,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2643,13 +2643,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2658,7 +2658,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2666,7 +2666,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2675,7 +2675,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2683,7 +2683,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2691,7 +2691,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2699,7 +2699,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2707,7 +2707,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2715,7 +2715,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2724,7 +2724,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2736,7 +2736,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2749,7 +2749,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2766,7 +2766,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2784,7 +2784,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2793,7 +2793,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2802,7 +2802,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2811,7 +2811,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2820,7 +2820,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2828,7 +2828,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2837,7 +2837,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2846,7 +2846,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2854,7 +2854,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2862,7 +2862,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2870,7 +2870,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2878,7 +2878,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2891,7 +2891,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2904,7 +2904,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2917,7 +2917,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2930,7 +2930,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2939,7 +2939,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2947,7 +2947,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2955,7 +2955,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2963,7 +2963,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2971,7 +2971,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2979,7 +2979,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2987,7 +2987,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2996,7 +2996,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3004,7 +3004,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3012,7 +3012,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3024,7 +3024,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3036,7 +3036,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3044,7 +3044,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3052,7 +3052,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3060,7 +3060,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3068,7 +3068,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3076,7 +3076,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3084,7 +3084,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3092,7 +3092,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3101,7 +3101,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3109,7 +3109,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3117,7 +3117,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3125,7 +3125,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3133,7 +3133,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3141,7 +3141,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3149,7 +3149,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3157,7 +3157,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3165,7 +3165,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3173,7 +3173,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3190,7 +3190,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3198,7 +3198,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3206,7 +3206,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3214,13 +3214,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3228,7 +3228,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3236,7 +3236,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3244,7 +3244,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3252,7 +3252,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3261,7 +3261,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3269,7 +3269,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3277,7 +3277,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3285,7 +3285,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3294,7 +3294,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3302,7 +3302,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3314,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3322,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3330,7 +3330,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3338,7 +3338,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3346,7 +3346,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3354,7 +3354,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3362,7 +3362,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3370,7 +3370,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3383,13 +3383,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3397,7 +3397,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3405,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3413,7 +3413,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3421,7 +3421,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3429,7 +3429,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3437,7 +3437,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3445,7 +3445,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3454,7 +3454,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3462,7 +3462,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3470,7 +3470,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3478,7 +3478,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3486,7 +3486,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3494,7 +3494,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3502,7 +3502,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3510,13 +3510,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3528,7 +3528,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3537,7 +3537,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3546,7 +3546,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3558,7 +3558,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3566,7 +3566,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3574,7 +3574,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3582,12 +3582,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3595,7 +3595,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3603,7 +3603,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3611,7 +3611,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3619,7 +3619,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3627,7 +3627,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3635,7 +3635,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3643,7 +3643,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3651,7 +3651,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3659,7 +3659,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3667,7 +3667,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3675,7 +3675,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3683,13 +3683,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3697,13 +3697,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3711,7 +3711,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3719,18 +3719,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3738,23 +3738,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3762,7 +3762,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3770,7 +3770,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3778,7 +3778,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3786,23 +3786,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3810,7 +3810,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3818,7 +3818,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3826,7 +3826,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3834,7 +3834,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3842,7 +3842,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3850,7 +3850,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3862,7 +3862,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3874,12 +3874,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3887,7 +3887,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3895,7 +3895,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3903,7 +3903,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3911,7 +3911,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3919,7 +3919,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3927,7 +3927,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3935,7 +3935,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3944,7 +3944,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3952,7 +3952,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3960,7 +3960,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3968,7 +3968,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3976,7 +3976,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3984,7 +3984,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3993,7 +3993,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4001,7 +4001,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4009,7 +4009,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4017,7 +4017,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4025,7 +4025,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4033,7 +4033,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4041,7 +4041,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4049,7 +4049,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4057,7 +4057,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4065,7 +4065,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4074,7 +4074,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4082,7 +4082,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4090,7 +4090,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4098,7 +4098,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4106,7 +4106,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4114,7 +4114,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4122,7 +4122,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4131,7 +4131,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4139,7 +4139,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4147,12 +4147,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4160,7 +4160,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4168,7 +4168,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4177,19 +4177,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4197,19 +4197,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4218,25 +4218,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4245,7 +4245,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4254,13 +4254,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4272,7 +4272,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4280,7 +4280,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4288,7 +4288,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4305,7 +4305,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4323,7 +4323,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4340,7 +4340,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4348,7 +4348,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4356,7 +4356,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4364,7 +4364,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4372,12 +4372,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4385,7 +4385,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } @@ -4393,7 +4393,7 @@ func WSACleanup() (err error) { } func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { - r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) if r1 != 0 { err = errnoErr(e1) } @@ -4401,7 +4401,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err } func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4414,7 +4414,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4422,7 +4422,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4430,7 +4430,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4438,7 +4438,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4446,7 +4446,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4454,7 +4454,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4462,7 +4462,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4470,7 +4470,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4478,7 +4478,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4486,7 +4486,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4495,7 +4495,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4503,7 +4503,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4511,7 +4511,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4519,7 +4519,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4536,7 +4536,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4545,7 +4545,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4562,7 +4562,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4585,7 +4585,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4594,7 +4594,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4602,7 +4602,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4610,7 +4610,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4618,7 +4618,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4628,7 +4628,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4641,7 +4641,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4649,7 +4649,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4657,7 +4657,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4665,7 +4665,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4674,7 +4674,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4682,12 +4682,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d4..df35bb9a 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 0ad6bb1f..360db08e 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -112,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -133,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -279,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index e6204725..67f315a0 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -67,21 +67,21 @@ var ( disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", + Unit: "{disconnection}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_succeeded", Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_failed", Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index cd3eaf8d..3f762285 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -208,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -1076,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1831,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index a63ab606..c8e337cd 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -173,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 20f65f7b..8277be7d 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -110,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -259,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -271,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ec0ca89c..7a5ac2e7 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -608,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 2fdaed88..7e060f5e 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,26 +26,32 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3ac798e8..2699223a 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95..ada5251c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 3dea2357..d954a64c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -277,11 +277,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status if err == nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. + s.hdrMu.Lock() for _, sh := range ht.stats { sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } + s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 9f725e15..83cee314 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -1353,10 +1353,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - oldState := s.swapState(streamDone) - if oldState == streamDone { - return - } + // We can't return early even if the stream's state is "done" as the state + // might have been set by the `finishStream` method. Deleting the stream via + // `finishStream` can get blocked on flow control. + s.swapState(streamDone) t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798..aa52bfe9 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d..8e6af951 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 70fe23f5..1da2a542 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1598,6 +1598,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index baf7740e..10bf998a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index ca694892..d9bbd4c5 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -469,8 +469,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -481,6 +482,11 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } + } return nil } @@ -1580,6 +1586,7 @@ type serverStream struct { s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1588,6 +1595,8 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1774,6 +1783,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1781,6 +1794,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1800,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8b0e5f97..468f1106 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.74.2" +const Version = "1.75.1" diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index a0aad277..66ba9068 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -13,8 +13,10 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} +var ( + defaultsCache = make(map[Edition]EditionFeatures) + defaultsKeys = []Edition{} +) func init() { unmarshalEditionDefaults(editiondefaults.Defaults) @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { b = b[m:] parent.StripEnumPrefix = int(v) default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num)) } } return parent @@ -76,7 +78,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { // DefaultSymbolVisibility is enforced in protoc, runtimes should not // inspect this value. default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num)) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -150,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) { _, m := protowire.ConsumeVarint(b) b = b[m:] default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f9185..3ceb6fa7 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a53364c5..31e79a65 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 7 + Patch = 9 PreRelease = "" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index 0595f6a0..72c4be64 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -51,6 +51,9 @@ github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k github.com/ProtonMail/go-crypto/openpgp/x25519 github.com/ProtonMail/go-crypto/openpgp/x448 +# github.com/atotto/clipboard v0.1.4 +## explicit +github.com/atotto/clipboard # github.com/aymanbagabas/go-osc52/v2 v2.0.1 ## explicit; go 1.16 github.com/aymanbagabas/go-osc52/v2 @@ -65,8 +68,15 @@ github.com/cenkalti/backoff/v5 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/charmbracelet/bubbletea v1.3.6 +# github.com/charmbracelet/bubbles v0.21.0 ## explicit; go 1.23.0 +github.com/charmbracelet/bubbles/cursor +github.com/charmbracelet/bubbles/key +github.com/charmbracelet/bubbles/runeutil +github.com/charmbracelet/bubbles/spinner +github.com/charmbracelet/bubbles/textinput +# github.com/charmbracelet/bubbletea v1.3.10 +## explicit; go 1.24.0 github.com/charmbracelet/bubbletea # github.com/charmbracelet/colorprofile v0.3.2 ## explicit; go 1.23.0 @@ -78,8 +88,8 @@ github.com/charmbracelet/lipgloss/table # github.com/charmbracelet/log v0.4.2 ## explicit; go 1.19 github.com/charmbracelet/log -# github.com/charmbracelet/x/ansi v0.10.1 -## explicit; go 1.23.0 +# github.com/charmbracelet/x/ansi v0.10.2 +## explicit; go 1.24.0 github.com/charmbracelet/x/ansi github.com/charmbracelet/x/ansi/parser # github.com/charmbracelet/x/cellbuf v0.0.13 @@ -88,6 +98,10 @@ github.com/charmbracelet/x/cellbuf # github.com/charmbracelet/x/term v0.2.1 ## explicit; go 1.18 github.com/charmbracelet/x/term +# github.com/clipperhouse/uax29/v2 v2.2.0 +## explicit; go 1.18 +github.com/clipperhouse/uax29/v2/graphemes +github.com/clipperhouse/uax29/v2/internal/iterators # github.com/cloudflare/circl v1.6.1 ## explicit; go 1.22.0 github.com/cloudflare/circl/dh/x25519 @@ -135,9 +149,19 @@ github.com/containers/image/types # github.com/cpuguy83/go-md2man/v2 v2.0.7 ## explicit; go 1.12 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/cyphar/filepath-securejoin v0.4.1 +# github.com/cyphar/filepath-securejoin v0.5.0 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin +github.com/cyphar/filepath-securejoin/internal/consts +github.com/cyphar/filepath-securejoin/pathrs-lite +github.com/cyphar/filepath-securejoin/pathrs-lite/internal +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs +github.com/cyphar/filepath-securejoin/pathrs-lite/procfs # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -147,7 +171,7 @@ github.com/decentral1se/passgen # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v28.3.3+incompatible +# github.com/docker/cli v28.4.0+incompatible ## explicit github.com/docker/cli/cli github.com/docker/cli/cli-plugins/metadata @@ -181,6 +205,7 @@ github.com/docker/cli/cli/version github.com/docker/cli/internal/lazyregexp github.com/docker/cli/internal/prompt github.com/docker/cli/internal/tui +github.com/docker/cli/internal/volumespec github.com/docker/cli/opts github.com/docker/cli/opts/swarmopts github.com/docker/cli/pkg/kvfile @@ -196,7 +221,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v28.3.3+incompatible +# github.com/docker/docker v28.4.0+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -221,8 +246,6 @@ github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs -github.com/docker/docker/internal/lazyregexp -github.com/docker/docker/internal/multierror github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools @@ -260,6 +283,9 @@ github.com/emirpasic/gods/utils # github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f ## explicit; go 1.16 github.com/erikgeiser/coninput +# github.com/evertras/bubble-table v0.19.2 +## explicit; go 1.18 +github.com/evertras/bubble-table/table # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -345,9 +371,6 @@ github.com/go-logr/stdr ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 -github.com/gogo/protobuf/proto # github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 ## explicit; go 1.20 github.com/golang/groupcache/lru @@ -367,7 +390,7 @@ github.com/google/uuid # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -387,8 +410,8 @@ github.com/jbenet/go-context/io # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/kevinburke/ssh_config v1.2.0 -## explicit +# github.com/kevinburke/ssh_config v1.4.0 +## explicit; go 1.18 github.com/kevinburke/ssh_config # github.com/klauspost/compress v1.18.0 ## explicit; go 1.22 @@ -400,11 +423,14 @@ github.com/klauspost/compress/internal/le github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/klauspost/cpuid/v2 v2.3.0 +## explicit; go 1.22 +github.com/klauspost/cpuid/v2 # github.com/leonelquinteros/gotext v1.7.2 ## explicit; go 1.23.5 github.com/leonelquinteros/gotext github.com/leonelquinteros/gotext/plurals -# github.com/lucasb-eyer/go-colorful v1.2.0 +# github.com/lucasb-eyer/go-colorful v1.3.0 ## explicit; go 1.12 github.com/lucasb-eyer/go-colorful # github.com/mattn/go-colorable v0.1.14 @@ -416,8 +442,8 @@ github.com/mattn/go-isatty # github.com/mattn/go-localereader v0.0.1 ## explicit github.com/mattn/go-localereader -# github.com/mattn/go-runewidth v0.0.16 -## explicit; go 1.9 +# github.com/mattn/go-runewidth v0.0.19 +## explicit; go 1.20 github.com/mattn/go-runewidth # github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d ## explicit @@ -467,6 +493,11 @@ github.com/muesli/ansi/compressor # github.com/muesli/cancelreader v0.2.2 ## explicit; go 1.17 github.com/muesli/cancelreader +# github.com/muesli/reflow v0.3.0 +## explicit; go 1.13 +github.com/muesli/reflow/ansi +github.com/muesli/reflow/truncate +github.com/muesli/reflow/wordwrap # github.com/muesli/termenv v0.16.0 ## explicit; go 1.17 github.com/muesli/termenv @@ -484,8 +515,8 @@ github.com/opencontainers/image-spec/specs-go/v1 ## explicit; go 1.18 # github.com/opencontainers/runtime-spec v1.1.0 ## explicit -# github.com/pjbgf/sha1cd v0.4.0 -## explicit; go 1.21 +# github.com/pjbgf/sha1cd v0.5.0 +## explicit; go 1.22 github.com/pjbgf/sha1cd github.com/pjbgf/sha1cd/internal github.com/pjbgf/sha1cd/ubc @@ -495,7 +526,7 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.23.0 +# github.com/prometheus/client_golang v1.23.2 ## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -506,7 +537,7 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.0 +# github.com/prometheus/common v0.66.1 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -533,14 +564,14 @@ github.com/sirupsen/logrus # github.com/skeema/knownhosts v1.3.1 ## explicit; go 1.22 github.com/skeema/knownhosts -# github.com/spf13/cobra v1.9.1 +# github.com/spf13/cobra v1.10.1 ## explicit; go 1.15 github.com/spf13/cobra github.com/spf13/cobra/doc -# github.com/spf13/pflag v1.0.7 +# github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.10.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -561,17 +592,16 @@ github.com/xeipuuv/gojsonschema # github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e ## explicit; go 1.19 github.com/xo/terminfo -# go.opentelemetry.io/auto/sdk v1.1.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/auto/sdk v1.2.1 +## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.37.0 +# go.opentelemetry.io/otel v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -581,11 +611,11 @@ go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 -go.opentelemetry.io/otel/semconv/v1.34.0 -go.opentelemetry.io/otel/semconv/v1.34.0/httpconv -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 +go.opentelemetry.io/otel/semconv/v1.37.0 +go.opentelemetry.io/otel/semconv/v1.37.0/httpconv +go.opentelemetry.io/otel/semconv/v1.37.0/otelconv +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal @@ -593,11 +623,11 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal @@ -606,12 +636,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpcon go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 ## explicit; go 1.20 -# go.opentelemetry.io/otel/metric v1.37.0 +# go.opentelemetry.io/otel/metric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.37.0 +# go.opentelemetry.io/otel/sdk v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -619,7 +649,8 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/sdk/metric v1.37.0 +go.opentelemetry.io/otel/sdk/trace/internal/x +# go.opentelemetry.io/otel/sdk/metric v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar @@ -627,13 +658,13 @@ go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.37.0 +# go.opentelemetry.io/otel/trace v1.38.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.7.1 +# go.opentelemetry.io/proto/otlp v1.8.0 ## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -641,8 +672,11 @@ go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/metrics/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.41.0 -## explicit; go 1.23.0 +# go.yaml.in/yaml/v2 v2.4.3 +## explicit; go 1.15 +go.yaml.in/yaml/v2 +# golang.org/x/crypto v0.42.0 +## explicit; go 1.24.0 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish @@ -659,14 +693,14 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 +# golang.org/x/exp v0.0.0-20250911091902-df9299821621 ## explicit; go 1.24.0 golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/net v0.43.0 -## explicit; go 1.23.0 +# golang.org/x/net v0.44.0 +## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -677,22 +711,19 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.16.0 -## explicit; go 1.23.0 -golang.org/x/sync/errgroup -# golang.org/x/sys v0.35.0 -## explicit; go 1.23.0 +# golang.org/x/sys v0.36.0 +## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.34.0 -## explicit; go 1.23.0 +# golang.org/x/term v0.35.0 +## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.28.0 -## explicit; go 1.23.0 +# golang.org/x/text v0.29.0 +## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/internal golang.org/x/text/internal/language @@ -704,17 +735,17 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.12.0 -## explicit; go 1.23.0 +# golang.org/x/time v0.13.0 +## explicit; go 1.24.0 golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.74.2 +# google.golang.org/grpc v1.75.1 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -778,8 +809,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.7 -## explicit; go 1.22 +# google.golang.org/protobuf v1.36.9 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext