vendor: align other otel packages to v1.38.0
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
22
vendor.mod
22
vendor.mod
@ -49,11 +49,11 @@ require (
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
|
||||
go.opentelemetry.io/otel/metric v1.38.0
|
||||
go.opentelemetry.io/otel/sdk v1.35.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0
|
||||
go.opentelemetry.io/otel/sdk v1.38.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/sys v0.37.0
|
||||
@ -68,7 +68,7 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
@ -80,7 +80,7 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
@ -96,13 +96,13 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
google.golang.org/grpc v1.75.0 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
)
|
||||
|
||||
|
||||
46
vendor.sum
46
vendor.sum
@ -12,8 +12,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
@ -87,8 +87,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
@ -211,22 +211,22 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRND
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
|
||||
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -277,12 +277,14 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
|
||||
google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
|
||||
google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
|
||||
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
||||
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@ -1,216 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff)
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
for _, fn := range opts {
|
||||
fn(b)
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.InitialInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.RandomizationFactor = randomizationFactor
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Multiplier = multiplier
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxElapsedTime = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Stop = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
||||
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [5.0.0] - 2024-12-19
|
||||
|
||||
### Added
|
||||
|
||||
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
|
||||
|
||||
### Changed
|
||||
|
||||
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
|
||||
- Retry function now accepts a context.Context.
|
||||
- Operation function signature changed to return result (any type) and error.
|
||||
|
||||
### Removed
|
||||
|
||||
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
|
||||
- Optional arguments from ExponentialBackoff constructor.
|
||||
- Clock and Timer interfaces.
|
||||
|
||||
### Fixed
|
||||
|
||||
- The original error is returned from Retry if there's a PermanentError. (#144)
|
||||
- The Retry function respects the wrapped PermanentError. (#140)
|
||||
@ -1,4 +1,4 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
For most cases, use `Retry` function. See [example_test.go][example] for an example.
|
||||
|
||||
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
|
||||
|
||||
## Contributing
|
||||
|
||||
@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
||||
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
|
||||
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
|
||||
@ -15,16 +15,16 @@ import "time"
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
// backoff.Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
// duration := backoff.NextBackOff()
|
||||
// if duration == backoff.Stop {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the Permanent error.
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped error.
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// RetryAfterError signals that the operation should be retried after the given duration.
|
||||
type RetryAfterError struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
|
||||
func RetryAfter(seconds int) error {
|
||||
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the RetryAfter error.
|
||||
func (e *RetryAfterError) Error() string {
|
||||
return fmt.Sprintf("retry after %s", e.Duration)
|
||||
}
|
||||
118
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
118
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand/v2"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
Example: Given the following default arguments, for 9 tries the sequence will be:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
|
||||
currentInterval time.Duration
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
return &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
//
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
if b.currentInterval == 0 {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
return next
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
//
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
|
||||
const DefaultMaxElapsedTime = 15 * time.Minute
|
||||
|
||||
// Operation is a function that attempts an operation and may be retried.
|
||||
type Operation[T any] func() (T, error)
|
||||
|
||||
// Notify is a function called on operation error with the error and backoff duration.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// retryOptions holds configuration settings for the retry mechanism.
|
||||
type retryOptions struct {
|
||||
BackOff BackOff // Strategy for calculating backoff periods.
|
||||
Timer timer // Timer to manage retry delays.
|
||||
Notify Notify // Optional function to notify on each retry error.
|
||||
MaxTries uint // Maximum number of retry attempts.
|
||||
MaxElapsedTime time.Duration // Maximum total time for all retries.
|
||||
}
|
||||
|
||||
type RetryOption func(*retryOptions)
|
||||
|
||||
// WithBackOff configures a custom backoff strategy.
|
||||
func WithBackOff(b BackOff) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.BackOff = b
|
||||
}
|
||||
}
|
||||
|
||||
// withTimer sets a custom timer for managing delays between retries.
|
||||
func withTimer(t timer) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Timer = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithNotify sets a notification function to handle retry errors.
|
||||
func WithNotify(n Notify) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Notify = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxTries limits the number of all attempts.
|
||||
func WithMaxTries(n uint) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxTries = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime limits the total duration for retry attempts.
|
||||
func WithMaxElapsedTime(d time.Duration) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxElapsedTime = d
|
||||
}
|
||||
}
|
||||
|
||||
// Retry attempts the operation until success, a permanent error, or backoff completion.
|
||||
// It ensures the operation is executed at least once.
|
||||
//
|
||||
// Returns the operation result or error if retries are exhausted or context is cancelled.
|
||||
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
|
||||
// Initialize default retry options.
|
||||
args := &retryOptions{
|
||||
BackOff: NewExponentialBackOff(),
|
||||
Timer: &defaultTimer{},
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
}
|
||||
|
||||
// Apply user-provided options to the default settings.
|
||||
for _, opt := range opts {
|
||||
opt(args)
|
||||
}
|
||||
|
||||
defer args.Timer.Stop()
|
||||
|
||||
startedAt := time.Now()
|
||||
args.BackOff.Reset()
|
||||
for numTries := uint(1); ; numTries++ {
|
||||
// Execute the operation.
|
||||
res, err := operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Stop retrying if maximum tries exceeded.
|
||||
if args.MaxTries > 0 && numTries >= args.MaxTries {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Handle permanent errors without retrying.
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Unwrap()
|
||||
}
|
||||
|
||||
// Stop retrying if context is cancelled.
|
||||
if cerr := context.Cause(ctx); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
// Calculate next backoff duration.
|
||||
next := args.BackOff.NextBackOff()
|
||||
if next == Stop {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Reset backoff if RetryAfterError is encountered.
|
||||
var retryAfter *RetryAfterError
|
||||
if errors.As(err, &retryAfter) {
|
||||
next = retryAfter.Duration
|
||||
args.BackOff.Reset()
|
||||
}
|
||||
|
||||
// Stop retrying if maximum elapsed time exceeded.
|
||||
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Notify on error if a notifier function is provided.
|
||||
if args.Notify != nil {
|
||||
args.Notify(err, next)
|
||||
}
|
||||
|
||||
// Wait for the next backoff period or context cancellation.
|
||||
args.Timer.Start(next)
|
||||
select {
|
||||
case <-args.Timer.C():
|
||||
case <-ctx.Done():
|
||||
return res, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,6 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -14,8 +13,7 @@ type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
timer timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
@ -27,22 +25,12 @@ type Ticker struct {
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
timer: &defaultTimer{},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
@ -73,8 +61,6 @@ func (t *Ticker) run() {
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2,7 +2,7 @@ package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
type timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
28
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
28
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@ -148,22 +148,20 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
if ok {
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
// generate trailer fields that it believes are necessary for the user
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(r)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
// generate trailer fields that it believes are necessary for the user
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(r)
|
||||
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, mux, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
if doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, mux, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
}
|
||||
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||
grpclog.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && requestAcceptsTrailers(r) {
|
||||
handleForwardResponseTrailer(w, mux, md)
|
||||
}
|
||||
}
|
||||
|
||||
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
@ -153,12 +153,10 @@ type responseBody interface {
|
||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
||||
if ok {
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(req)
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, mux, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||
grpclog.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && doForwardTrailers {
|
||||
handleForwardResponseTrailer(w, mux, md)
|
||||
}
|
||||
}
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
@ -199,3 +199,33 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
@ -5,8 +5,11 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -17,8 +20,6 @@ import (
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
@ -149,7 +150,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
)
|
||||
|
||||
if c.exportTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||
ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(parent)
|
||||
}
|
||||
|
||||
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
@ -238,8 +238,9 @@ func WithTimeout(duration time.Duration) Option {
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
// These settings define the retry strategy implemented by the exporter.
|
||||
// These settings do not define any network retry strategy.
|
||||
// That is handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
|
||||
9
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
@ -9,12 +9,13 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Exporter is a OpenTelemetry metric Exporter using gRPC.
|
||||
@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (e *Exporter) ForceFlush(ctx context.Context) error {
|
||||
func (*Exporter) ForceFlush(ctx context.Context) error {
|
||||
// The exporter and client hold no state, nothing to flush.
|
||||
return ctx.Err()
|
||||
}
|
||||
@ -119,7 +120,7 @@ var errShutdown = errors.New("gRPC exporter is shutdown")
|
||||
|
||||
type shutdownClient struct{}
|
||||
|
||||
func (c shutdownClient) err(ctx context.Context) error {
|
||||
func (shutdownClient) err(ctx context.Context) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the Exporter.
|
||||
func (e *Exporter) MarshalLog() interface{} {
|
||||
func (*Exporter) MarshalLog() any {
|
||||
return struct{ Type string }{Type: "OTLP/gRPC"}
|
||||
}
|
||||
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlpmetricgrpc package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"METRICS_CLIENT_CERTIFICATE",
|
||||
"METRICS_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption {
|
||||
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||
withEnvTemporalityPreference(
|
||||
"METRICS_TEMPORALITY_PREFERENCE",
|
||||
func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) },
|
||||
),
|
||||
withEnvAggPreference(
|
||||
"METRICS_DEFAULT_HISTOGRAM_AGGREGATION",
|
||||
func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) },
|
||||
),
|
||||
)
|
||||
|
||||
return opts
|
||||
@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
|
||||
case "lowmemory":
|
||||
fn(lowMemory)
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e
|
||||
return metric.DefaultAggregationSelector(kind)
|
||||
})
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package oconf provides configuration for the otlpmetric exporters.
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import (
|
||||
@ -56,13 +57,15 @@ type (
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
TemporalitySelector metric.TemporalitySelector
|
||||
AggregationSelector metric.AggregationSelector
|
||||
|
||||
Proxy HTTPTransportProxyFunc
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
// HTTP configurations
|
||||
Proxy HTTPTransportProxyFunc
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
Config struct {
|
||||
@ -102,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
|
||||
// defaultPath is returned instead.
|
||||
func cleanPath(urlPath string, defaultPath string) string {
|
||||
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||
if tmp == "." {
|
||||
tmp := strings.TrimSpace(urlPath)
|
||||
if tmp == "" || tmp == "." {
|
||||
return defaultPath
|
||||
}
|
||||
if !path.IsAbs(tmp) {
|
||||
@ -372,3 +374,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHTTPClient(c *http.Client) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.HTTPClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
@ -136,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint
|
||||
|
||||
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
func ExponentialHistogram[N int64 | float64](
|
||||
h metricdata.ExponentialHistogram[N],
|
||||
) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N
|
||||
|
||||
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||
// from dPts.
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](
|
||||
dPts []metricdata.ExponentialHistogramDataPoint[N],
|
||||
) []*mpb.ExponentialHistogramDataPoint {
|
||||
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen
|
||||
|
||||
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||
// from bucket.
|
||||
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
func ExponentialHistogramDataPointBuckets(
|
||||
bucket metricdata.ExponentialBucket,
|
||||
) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||
Offset: bucket.Offset,
|
||||
BucketCounts: bucket.Counts,
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
||||
func Version() string {
|
||||
return "1.35.0"
|
||||
return "1.38.0"
|
||||
}
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
generated
vendored
@ -199,3 +199,33 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter {
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
|
||||
func (e *Exporter) MarshalLog() interface{} {
|
||||
func (e *Exporter) MarshalLog() any {
|
||||
return struct {
|
||||
Type string
|
||||
Client Client
|
||||
|
||||
@ -1,12 +1,15 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package tracetransform provides conversion functionality for the otlptrace
|
||||
// exporters.
|
||||
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||
|
||||
import (
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
|
||||
@ -4,8 +4,9 @@
|
||||
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
)
|
||||
|
||||
func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
|
||||
|
||||
@ -4,8 +4,9 @@
|
||||
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Resource transforms a Resource into an OTLP Resource.
|
||||
|
||||
@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr
|
||||
import (
|
||||
"math"
|
||||
|
||||
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
)
|
||||
|
||||
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
|
||||
@ -154,7 +155,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
|
||||
for _, otLink := range links {
|
||||
// This redefinition is necessary to prevent otLink.*ID[:] copies
|
||||
// being reused -- in short we need a new otLink per iteration.
|
||||
otLink := otLink
|
||||
|
||||
tid := otLink.SpanContext.TraceID()
|
||||
sid := otLink.SpanContext.SpanID()
|
||||
@ -189,7 +189,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
|
||||
|
||||
events := make([]*tracepb.Span_Event, len(es))
|
||||
// Transform message events
|
||||
for i := 0; i < len(es); i++ {
|
||||
for i := range es {
|
||||
events[i] = &tracepb.Span_Event{
|
||||
Name: es[i].Name,
|
||||
TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
generated
vendored
@ -199,3 +199,33 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
@ -9,6 +9,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -20,8 +22,6 @@ import (
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
|
||||
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
)
|
||||
|
||||
if c.exportTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||
ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(parent)
|
||||
}
|
||||
@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) {
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Client.
|
||||
func (c *client) MarshalLog() interface{} {
|
||||
func (c *client) MarshalLog() any {
|
||||
return struct {
|
||||
Type string
|
||||
Endpoint string
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlptracegrpc package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"TRACES_CLIENT_CERTIFICATE",
|
||||
"TRACES_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package otlpconfig provides configuration for the otlptrace exporters.
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||
|
||||
import (
|
||||
@ -52,7 +53,9 @@ type (
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
Proxy HTTPTransportProxyFunc
|
||||
// HTTP configurations
|
||||
Proxy HTTPTransportProxyFunc
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
Config struct {
|
||||
@ -89,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
|
||||
// defaultPath is returned instead.
|
||||
func cleanPath(urlPath string, defaultPath string) string {
|
||||
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||
if tmp == "." {
|
||||
tmp := strings.TrimSpace(urlPath)
|
||||
if tmp == "" || tmp == "." {
|
||||
return defaultPath
|
||||
}
|
||||
if !path.IsAbs(tmp) {
|
||||
@ -349,3 +351,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHTTPClient(c *http.Client) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Traces.HTTPClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
@ -136,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option {
|
||||
// explicitly returns a backoff time in the response. That time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
// These settings define the retry strategy implemented by the exporter.
|
||||
// These settings do not define any network retry strategy.
|
||||
// That is handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
||||
func Version() string {
|
||||
return "1.35.0"
|
||||
return "1.38.0"
|
||||
}
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/sdk/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/sdk/LICENSE
generated
vendored
@ -199,3 +199,33 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
generated
vendored
@ -1,6 +1,8 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package env provides types and functionality for environment variable support
|
||||
// in the OpenTelemetry SDK.
|
||||
package env // import "go.opentelemetry.io/otel/sdk/internal/env"
|
||||
|
||||
import (
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
generated
vendored
@ -19,7 +19,7 @@ import (
|
||||
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
|
||||
// will also enable this).
|
||||
var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
|
||||
if strings.ToLower(v) == "true" {
|
||||
if strings.EqualFold(v, "true") {
|
||||
return v, true
|
||||
}
|
||||
return "", false
|
||||
@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
|
||||
return f.parse(vRaw)
|
||||
}
|
||||
|
||||
// Enabled returns if the feature is enabled.
|
||||
// Enabled reports whether the feature is enabled.
|
||||
func (f Feature[T]) Enabled() bool {
|
||||
_, ok := f.Lookup()
|
||||
return ok
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
@ -199,3 +199,33 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
46
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
46
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -17,12 +18,15 @@ import (
|
||||
|
||||
// config contains configuration options for a MeterProvider.
|
||||
type config struct {
|
||||
res *resource.Resource
|
||||
readers []Reader
|
||||
views []View
|
||||
exemplarFilter exemplar.Filter
|
||||
res *resource.Resource
|
||||
readers []Reader
|
||||
views []View
|
||||
exemplarFilter exemplar.Filter
|
||||
cardinalityLimit int
|
||||
}
|
||||
|
||||
const defaultCardinalityLimit = 0
|
||||
|
||||
// readerSignals returns a force-flush and shutdown function for a
|
||||
// MeterProvider to call in their respective options. All Readers c contains
|
||||
// will have their force-flush and shutdown methods unified into returned
|
||||
@ -69,8 +73,9 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er
|
||||
// newConfig returns a config configured with options.
|
||||
func newConfig(options []Option) config {
|
||||
conf := config{
|
||||
res: resource.Default(),
|
||||
exemplarFilter: exemplar.TraceBasedFilter,
|
||||
res: resource.Default(),
|
||||
exemplarFilter: exemplar.TraceBasedFilter,
|
||||
cardinalityLimit: cardinalityLimitFromEnv(),
|
||||
}
|
||||
for _, o := range meterProviderOptionsFromEnv() {
|
||||
conf = o.apply(conf)
|
||||
@ -155,6 +160,21 @@ func WithExemplarFilter(filter exemplar.Filter) Option {
|
||||
})
|
||||
}
|
||||
|
||||
// WithCardinalityLimit sets the cardinality limit for the MeterProvider.
|
||||
//
|
||||
// The cardinality limit is the hard limit on the number of metric datapoints
|
||||
// that can be collected for a single instrument in a single collect cycle.
|
||||
//
|
||||
// Setting this to a zero or negative value means no limit is applied.
|
||||
func WithCardinalityLimit(limit int) Option {
|
||||
// For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT`
|
||||
// can also be used to set this value.
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.cardinalityLimit = limit
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func meterProviderOptionsFromEnv() []Option {
|
||||
var opts []Option
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
|
||||
@ -170,3 +190,17 @@ func meterProviderOptionsFromEnv() []Option {
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func cardinalityLimitFromEnv() int {
|
||||
const cardinalityLimitKey = "OTEL_GO_X_CARDINALITY_LIMIT"
|
||||
v := strings.TrimSpace(os.Getenv(cardinalityLimitKey))
|
||||
if v == "" {
|
||||
return defaultCardinalityLimit
|
||||
}
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return defaultCardinalityLimit
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
24
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
24
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
@ -39,6 +39,30 @@
|
||||
// Meter.RegisterCallback and Registration.Unregister to add and remove
|
||||
// callbacks without leaking memory.
|
||||
//
|
||||
// # Cardinality Limits
|
||||
//
|
||||
// Cardinality refers to the number of unique attributes collected. High cardinality can lead to
|
||||
// excessive memory usage, increased storage costs, and backend performance issues.
|
||||
//
|
||||
// Currently, the OpenTelemetry Go Metric SDK does not enforce a cardinality limit by default
|
||||
// (note that this may change in a future release). Use [WithCardinalityLimit] to set the
|
||||
// cardinality limit as desired.
|
||||
//
|
||||
// New attribute sets are dropped when the cardinality limit is reached. The measurement of
|
||||
// these sets are aggregated into
|
||||
// a special attribute set containing attribute.Bool("otel.metric.overflow", true).
|
||||
// This ensures total metric values (e.g., Sum, Count) remain correct for the
|
||||
// collection cycle, but information about the specific dropped sets
|
||||
// is not preserved.
|
||||
//
|
||||
// Recommendations:
|
||||
//
|
||||
// - Set the limit based on the theoretical maximum combinations or expected
|
||||
// active combinations. The OpenTelemetry Specification recommends a default of 2000.
|
||||
// - A too high of a limit increases worst-case memory overhead in the SDK and may cause downstream
|
||||
// issues for databases that cannot handle high cardinality.
|
||||
// - A too low of a limit causes loss of attribute detail as more data falls into overflow.
|
||||
//
|
||||
// See [go.opentelemetry.io/otel/metric] for more information about
|
||||
// the metric API.
|
||||
//
|
||||
|
||||
20
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
generated
vendored
20
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
generated
vendored
@ -18,7 +18,10 @@ type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvi
|
||||
|
||||
// reservoirFunc returns the appropriately configured exemplar reservoir
|
||||
// creation func based on the passed InstrumentKind and filter configuration.
|
||||
func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
|
||||
func reservoirFunc[N int64 | float64](
|
||||
provider exemplar.ReservoirProvider,
|
||||
filter exemplar.Filter,
|
||||
) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
|
||||
return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] {
|
||||
return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs))
|
||||
}
|
||||
@ -55,10 +58,7 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi
|
||||
// SimpleFixedSizeExemplarReservoir with a reservoir equal to the
|
||||
// smaller of the maximum number of buckets configured on the
|
||||
// aggregation or twenty (e.g. min(20, max_buckets)).
|
||||
n = int(a.MaxSize)
|
||||
if n > 20 {
|
||||
n = 20
|
||||
}
|
||||
n = min(int(a.MaxSize), 20)
|
||||
} else {
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
|
||||
// This Exemplar reservoir MAY take a configuration parameter for
|
||||
@ -66,11 +66,11 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi
|
||||
// provided, the default size MAY be the number of possible
|
||||
// concurrent threads (e.g. number of CPUs) to help reduce
|
||||
// contention. Otherwise, a default size of 1 SHOULD be used.
|
||||
n = runtime.NumCPU()
|
||||
if n < 1 {
|
||||
// Should never be the case, but be defensive.
|
||||
n = 1
|
||||
}
|
||||
//
|
||||
// Use runtime.GOMAXPROCS instead of runtime.NumCPU to support
|
||||
// containerized environments that may have less than the total number
|
||||
// of logical CPUs available on the local machine allocated to it.
|
||||
n = max(runtime.GOMAXPROCS(0), 1)
|
||||
}
|
||||
|
||||
return exemplar.FixedSizeReservoirProvider(n)
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
generated
vendored
@ -24,11 +24,11 @@ func TraceBasedFilter(ctx context.Context) bool {
|
||||
}
|
||||
|
||||
// AlwaysOnFilter is a [Filter] that always offers measurements.
|
||||
func AlwaysOnFilter(ctx context.Context) bool {
|
||||
func AlwaysOnFilter(context.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AlwaysOffFilter is a [Filter] that never offers measurements.
|
||||
func AlwaysOffFilter(ctx context.Context) bool {
|
||||
func AlwaysOffFilter(context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
50
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
generated
vendored
50
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
generated
vendored
@ -6,7 +6,7 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@ -14,7 +14,7 @@ import (
|
||||
|
||||
// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
|
||||
func FixedSizeReservoirProvider(k int) ReservoirProvider {
|
||||
return func(_ attribute.Set) Reservoir {
|
||||
return func(attribute.Set) Reservoir {
|
||||
return NewFixedSizeReservoir(k)
|
||||
}
|
||||
}
|
||||
@ -44,18 +44,11 @@ type FixedSizeReservoir struct {
|
||||
// w is the largest random number in a distribution that is used to compute
|
||||
// the next next.
|
||||
w float64
|
||||
|
||||
// rng is used to make sampling decisions.
|
||||
//
|
||||
// Do not use crypto/rand. There is no reason for the decrease in performance
|
||||
// given this is not a security sensitive decision.
|
||||
rng *rand.Rand
|
||||
}
|
||||
|
||||
func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
|
||||
r := &FixedSizeReservoir{
|
||||
storage: s,
|
||||
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
r.reset()
|
||||
return r
|
||||
@ -63,27 +56,16 @@ func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
|
||||
|
||||
// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
|
||||
// open interval (0.0,1.0).
|
||||
func (r *FixedSizeReservoir) randomFloat64() float64 {
|
||||
// TODO: This does not return a uniform number. rng.Float64 returns a
|
||||
// uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
|
||||
// returns multiples of 2^-53, and not all floating point numbers between 0
|
||||
// and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
|
||||
// are always going to be 0).
|
||||
func (*FixedSizeReservoir) randomFloat64() float64 {
|
||||
// TODO: Use an algorithm that avoids rejection sampling. For example:
|
||||
//
|
||||
// An alternative algorithm should be considered that will actually return
|
||||
// a uniform number in the interval (0,1). For example, since the default
|
||||
// rand source provides a uniform distribution for Int63, this can be
|
||||
// converted following the prototypical code of Mersenne Twister 64 (Takuji
|
||||
// Nishimura and Makoto Matsumoto:
|
||||
// http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
|
||||
//
|
||||
// (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
|
||||
//
|
||||
// There are likely many other methods to explore here as well.
|
||||
|
||||
f := r.rng.Float64()
|
||||
// const precision = 1 << 53 // 2^53
|
||||
// // Generate an integer in [1, 2^53 - 1]
|
||||
// v := rand.Uint64() % (precision - 1) + 1
|
||||
// return float64(v) / float64(precision)
|
||||
f := rand.Float64()
|
||||
for f == 0 {
|
||||
f = r.rng.Float64()
|
||||
f = rand.Float64()
|
||||
}
|
||||
return f
|
||||
}
|
||||
@ -143,13 +125,11 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a
|
||||
|
||||
if int(r.count) < cap(r.store) {
|
||||
r.store[r.count] = newMeasurement(ctx, t, n, a)
|
||||
} else {
|
||||
if r.count == r.next {
|
||||
// Overwrite a random existing measurement with the one offered.
|
||||
idx := int(r.rng.Int63n(int64(cap(r.store))))
|
||||
r.store[idx] = newMeasurement(ctx, t, n, a)
|
||||
r.advance()
|
||||
}
|
||||
} else if r.count == r.next {
|
||||
// Overwrite a random existing measurement with the one offered.
|
||||
idx := int(rand.Int64N(int64(cap(r.store))))
|
||||
r.store[idx] = newMeasurement(ctx, t, n, a)
|
||||
r.advance()
|
||||
}
|
||||
r.count++
|
||||
}
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
generated
vendored
@ -16,7 +16,7 @@ import (
|
||||
func HistogramReservoirProvider(bounds []float64) ReservoirProvider {
|
||||
cp := slices.Clone(bounds)
|
||||
slices.Sort(cp)
|
||||
return func(_ attribute.Set) Reservoir {
|
||||
return func(attribute.Set) Reservoir {
|
||||
return NewHistogramReservoir(cp)
|
||||
}
|
||||
}
|
||||
|
||||
14
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
14
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
@ -28,7 +28,7 @@ type InstrumentKind uint8
|
||||
const (
|
||||
// instrumentKindUndefined is an undefined instrument kind, it should not
|
||||
// be used by any initialized type.
|
||||
instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused
|
||||
instrumentKindUndefined InstrumentKind = 0 // nolint:unused
|
||||
// InstrumentKindCounter identifies a group of instruments that record
|
||||
// increasing values synchronously with the code path they are measuring.
|
||||
InstrumentKindCounter InstrumentKind = 1
|
||||
@ -75,7 +75,7 @@ type Instrument struct {
|
||||
nonComparable // nolint: unused
|
||||
}
|
||||
|
||||
// IsEmpty returns if all Instrument fields are their zero-value.
|
||||
// IsEmpty reports whether all Instrument fields are their zero-value.
|
||||
func (i Instrument) IsEmpty() bool {
|
||||
return i.Name == "" &&
|
||||
i.Description == "" &&
|
||||
@ -204,11 +204,15 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *int64Inst) Enabled(_ context.Context) bool {
|
||||
func (i *int64Inst) Enabled(context.Context) bool {
|
||||
return len(i.measures) != 0
|
||||
}
|
||||
|
||||
func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
|
||||
func (i *int64Inst) aggregate(
|
||||
ctx context.Context,
|
||||
val int64,
|
||||
s attribute.Set,
|
||||
) { // nolint:revive // okay to shadow pkg with method.
|
||||
for _, in := range i.measures {
|
||||
in(ctx, val, s)
|
||||
}
|
||||
@ -241,7 +245,7 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *float64Inst) Enabled(_ context.Context) bool {
|
||||
func (i *float64Inst) Enabled(context.Context) bool {
|
||||
return len(i.measures) != 0
|
||||
}
|
||||
|
||||
|
||||
10
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
@ -121,7 +121,10 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
|
||||
|
||||
// ExplicitBucketHistogram returns a histogram aggregate function input and
|
||||
// output.
|
||||
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
|
||||
func (b Builder[N]) ExplicitBucketHistogram(
|
||||
boundaries []float64,
|
||||
noMinMax, noSum bool,
|
||||
) (Measure[N], ComputeAggregation) {
|
||||
h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
@ -133,7 +136,10 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu
|
||||
|
||||
// ExponentialBucketHistogram returns a histogram aggregate function input and
|
||||
// output.
|
||||
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
|
||||
func (b Builder[N]) ExponentialBucketHistogram(
|
||||
maxSize, maxScale int32,
|
||||
noMinMax, noSum bool,
|
||||
) (Measure[N], ComputeAggregation) {
|
||||
h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
@ -18,10 +18,10 @@ func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N
|
||||
type dropRes[N int64 | float64] struct{}
|
||||
|
||||
// Offer does nothing, all measurements offered will be dropped.
|
||||
func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
|
||||
func (*dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
|
||||
|
||||
// Collect resets dest. No exemplars will ever be returned.
|
||||
func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
|
||||
func (*dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
|
||||
clear(*dest) // Erase elements to let GC collect objects
|
||||
*dest = (*dest)[:0]
|
||||
}
|
||||
|
||||
63
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
63
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
@ -48,7 +48,12 @@ type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
zeroCount uint64
|
||||
}
|
||||
|
||||
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
|
||||
func newExpoHistogramDataPoint[N int64 | float64](
|
||||
attrs attribute.Set,
|
||||
maxSize int,
|
||||
maxScale int32,
|
||||
noMinMax, noSum bool,
|
||||
) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
|
||||
f := math.MaxFloat64
|
||||
ma := N(f) // if N is int64, max will overflow to -9223372036854775808
|
||||
mi := N(-f)
|
||||
@ -178,8 +183,8 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int)
|
||||
|
||||
var count int32
|
||||
for high-low >= p.maxSize {
|
||||
low = low >> 1
|
||||
high = high >> 1
|
||||
low >>= 1
|
||||
high >>= 1
|
||||
count++
|
||||
if count > expoMaxScale-expoMinScale {
|
||||
return count
|
||||
@ -220,7 +225,7 @@ func (b *expoBuckets) record(bin int32) {
|
||||
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
|
||||
}
|
||||
|
||||
copy(b.counts[shift:origLen+int(shift)], b.counts[:])
|
||||
copy(b.counts[shift:origLen+int(shift)], b.counts)
|
||||
b.counts = b.counts[:newLength]
|
||||
for i := 1; i < int(shift); i++ {
|
||||
b.counts[i] = 0
|
||||
@ -259,7 +264,7 @@ func (b *expoBuckets) downscale(delta int32) {
|
||||
// new Counts: [4, 14, 30, 10]
|
||||
|
||||
if len(b.counts) <= 1 || delta < 1 {
|
||||
b.startBin = b.startBin >> delta
|
||||
b.startBin >>= delta
|
||||
return
|
||||
}
|
||||
|
||||
@ -277,13 +282,18 @@ func (b *expoBuckets) downscale(delta int32) {
|
||||
|
||||
lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
|
||||
b.counts = b.counts[:lastIdx+1]
|
||||
b.startBin = b.startBin >> delta
|
||||
b.startBin >>= delta
|
||||
}
|
||||
|
||||
// newExponentialHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an exponential histogram. Each histogram is scoped by attributes
|
||||
// and the aggregation cycle the measurements were made in.
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] {
|
||||
func newExponentialHistogram[N int64 | float64](
|
||||
maxSize, maxScale int32,
|
||||
noMinMax, noSum bool,
|
||||
limit int,
|
||||
r func(attribute.Set) FilteredExemplarReservoir[N],
|
||||
) *expoHistogram[N] {
|
||||
return &expoHistogram[N]{
|
||||
noSum: noSum,
|
||||
noMinMax: noMinMax,
|
||||
@ -314,7 +324,12 @@ type expoHistogram[N int64 | float64] struct {
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
|
||||
func (e *expoHistogram[N]) measure(
|
||||
ctx context.Context,
|
||||
value N,
|
||||
fltrAttr attribute.Set,
|
||||
droppedAttr []attribute.KeyValue,
|
||||
) {
|
||||
// Ignore NaN and infinity.
|
||||
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
|
||||
return
|
||||
@ -335,7 +350,9 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib
|
||||
v.res.Offer(ctx, value, droppedAttr)
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (e *expoHistogram[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
||||
@ -360,11 +377,19 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
hDPts[i].PositiveBucket.Counts = reset(
|
||||
hDPts[i].PositiveBucket.Counts,
|
||||
len(val.posBuckets.counts),
|
||||
len(val.posBuckets.counts),
|
||||
)
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
hDPts[i].NegativeBucket.Counts = reset(
|
||||
hDPts[i].NegativeBucket.Counts,
|
||||
len(val.negBuckets.counts),
|
||||
len(val.negBuckets.counts),
|
||||
)
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
if !e.noSum {
|
||||
@ -388,7 +413,9 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (e *expoHistogram[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
||||
@ -413,11 +440,19 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
hDPts[i].PositiveBucket.Counts = reset(
|
||||
hDPts[i].PositiveBucket.Counts,
|
||||
len(val.posBuckets.counts),
|
||||
len(val.posBuckets.counts),
|
||||
)
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
hDPts[i].NegativeBucket.Counts = reset(
|
||||
hDPts[i].NegativeBucket.Counts,
|
||||
len(val.negBuckets.counts),
|
||||
len(val.negBuckets.counts),
|
||||
)
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
if !e.noSum {
|
||||
|
||||
5
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
@ -33,7 +33,10 @@ type filteredExemplarReservoir[N int64 | float64] struct {
|
||||
|
||||
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
|
||||
// that are allowed by the filter.
|
||||
func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
|
||||
func NewFilteredExemplarReservoir[N int64 | float64](
|
||||
f exemplar.Filter,
|
||||
r exemplar.Reservoir,
|
||||
) FilteredExemplarReservoir[N] {
|
||||
return &filteredExemplarReservoir[N]{
|
||||
filter: f,
|
||||
reservoir: r,
|
||||
|
||||
29
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
29
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
@ -53,7 +53,12 @@ type histValues[N int64 | float64] struct {
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] {
|
||||
func newHistValues[N int64 | float64](
|
||||
bounds []float64,
|
||||
noSum bool,
|
||||
limit int,
|
||||
r func(attribute.Set) FilteredExemplarReservoir[N],
|
||||
) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
@ -71,7 +76,12 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r
|
||||
|
||||
// Aggregate records the measurement value, scoped by attr, and aggregates it
|
||||
// into a histogram.
|
||||
func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
|
||||
func (s *histValues[N]) measure(
|
||||
ctx context.Context,
|
||||
value N,
|
||||
fltrAttr attribute.Set,
|
||||
droppedAttr []attribute.KeyValue,
|
||||
) {
|
||||
// This search will return an index in the range [0, len(s.bounds)], where
|
||||
// it will return len(s.bounds) if value is greater than the last element
|
||||
// of s.bounds. This aligns with the buckets in that the length of buckets
|
||||
@ -108,7 +118,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
|
||||
|
||||
// newHistogram returns an Aggregator that summarizes a set of measurements as
|
||||
// an histogram.
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] {
|
||||
func newHistogram[N int64 | float64](
|
||||
boundaries []float64,
|
||||
noMinMax, noSum bool,
|
||||
limit int,
|
||||
r func(attribute.Set) FilteredExemplarReservoir[N],
|
||||
) *histogram[N] {
|
||||
return &histogram[N]{
|
||||
histValues: newHistValues[N](boundaries, noSum, limit, r),
|
||||
noMinMax: noMinMax,
|
||||
@ -125,7 +140,9 @@ type histogram[N int64 | float64] struct {
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (s *histogram[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
|
||||
@ -175,7 +192,9 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (s *histogram[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
|
||||
|
||||
21
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
21
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
@ -55,7 +55,9 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.
|
||||
s.values[attr.Equivalent()] = d
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (s *lastValue[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
|
||||
// the DataPoints is missed (better luck next time).
|
||||
@ -75,7 +77,9 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (s *lastValue[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
|
||||
// the DataPoints is missed (better luck next time).
|
||||
@ -114,7 +118,10 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
|
||||
|
||||
// newPrecomputedLastValue returns an aggregator that summarizes a set of
|
||||
// observations as the last one made.
|
||||
func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
|
||||
func newPrecomputedLastValue[N int64 | float64](
|
||||
limit int,
|
||||
r func(attribute.Set) FilteredExemplarReservoir[N],
|
||||
) *precomputedLastValue[N] {
|
||||
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
|
||||
}
|
||||
|
||||
@ -123,7 +130,9 @@ type precomputedLastValue[N int64 | float64] struct {
|
||||
*lastValue[N]
|
||||
}
|
||||
|
||||
func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (s *precomputedLastValue[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
|
||||
// the DataPoints is missed (better luck next time).
|
||||
@ -143,7 +152,9 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (s *precomputedLastValue[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
|
||||
// the DataPoints is missed (better luck next time).
|
||||
|
||||
22
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
22
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
@ -70,7 +70,9 @@ type sum[N int64 | float64] struct {
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (s *sum[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
@ -105,7 +107,9 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (s *sum[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
@ -143,7 +147,11 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
// newPrecomputedSum returns an aggregator that summarizes a set of
|
||||
// observations as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] {
|
||||
func newPrecomputedSum[N int64 | float64](
|
||||
monotonic bool,
|
||||
limit int,
|
||||
r func(attribute.Set) FilteredExemplarReservoir[N],
|
||||
) *precomputedSum[N] {
|
||||
return &precomputedSum[N]{
|
||||
valueMap: newValueMap[N](limit, r),
|
||||
monotonic: monotonic,
|
||||
@ -161,7 +169,9 @@ type precomputedSum[N int64 | float64] struct {
|
||||
reported map[attribute.Distinct]N
|
||||
}
|
||||
|
||||
func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
func (s *precomputedSum[N]) delta(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
newReported := make(map[attribute.Distinct]N)
|
||||
|
||||
@ -202,7 +212,9 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
func (s *precomputedSum[N]) cumulative(
|
||||
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
|
||||
) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
|
||||
1
vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionality for the metric package.
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
|
||||
|
||||
35
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
generated
vendored
35
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
generated
vendored
@ -1,47 +1,16 @@
|
||||
# Experimental Features
|
||||
|
||||
The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
|
||||
These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
|
||||
The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
|
||||
These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
|
||||
|
||||
These feature may change in backwards incompatible ways as feedback is applied.
|
||||
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
|
||||
|
||||
## Features
|
||||
|
||||
- [Cardinality Limit](#cardinality-limit)
|
||||
- [Exemplars](#exemplars)
|
||||
- [Instrument Enabled](#instrument-enabled)
|
||||
|
||||
### Cardinality Limit
|
||||
|
||||
The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
|
||||
|
||||
This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
|
||||
The value must be an integer value.
|
||||
All other values are ignored.
|
||||
|
||||
If the value set is less than or equal to `0`, no limit will be applied.
|
||||
|
||||
#### Examples
|
||||
|
||||
Set the cardinality limit to 2000.
|
||||
|
||||
```console
|
||||
export OTEL_GO_X_CARDINALITY_LIMIT=2000
|
||||
```
|
||||
|
||||
Set an infinite cardinality limit (functionally equivalent to disabling the feature).
|
||||
|
||||
```console
|
||||
export OTEL_GO_X_CARDINALITY_LIMIT=-1
|
||||
```
|
||||
|
||||
Disable the cardinality limit.
|
||||
|
||||
```console
|
||||
unset OTEL_GO_X_CARDINALITY_LIMIT
|
||||
```
|
||||
|
||||
### Exemplars
|
||||
|
||||
A sample of measurements made may be exported directly as a set of exemplars.
|
||||
|
||||
22
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
generated
vendored
22
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
generated
vendored
@ -10,25 +10,8 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CardinalityLimit is an experimental feature flag that defines if
|
||||
// cardinality limits should be applied to the recorded metric data-points.
|
||||
//
|
||||
// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
|
||||
// variable to the integer limit value you want to use.
|
||||
//
|
||||
// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
|
||||
// will disable the cardinality limits.
|
||||
var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return n, true
|
||||
})
|
||||
|
||||
// Feature is an experimental feature control flag. It provides a uniform way
|
||||
// to interact with these feature flags and parse their values.
|
||||
type Feature[T any] struct {
|
||||
@ -36,6 +19,7 @@ type Feature[T any] struct {
|
||||
parse func(v string) (T, bool)
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
|
||||
const envKeyRoot = "OTEL_GO_X_"
|
||||
return Feature[T]{
|
||||
@ -63,7 +47,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
|
||||
return f.parse(vRaw)
|
||||
}
|
||||
|
||||
// Enabled returns if the feature is enabled.
|
||||
// Enabled reports whether the feature is enabled.
|
||||
func (f Feature[T]) Enabled() bool {
|
||||
_, ok := f.Lookup()
|
||||
return ok
|
||||
@ -73,7 +57,7 @@ func (f Feature[T]) Enabled() bool {
|
||||
//
|
||||
// EnabledInstrument interface is implemented by synchronous instruments.
|
||||
type EnabledInstrument interface {
|
||||
// Enabled returns whether the instrument will process measurements for the given context.
|
||||
// Enabled reports whether the instrument will process measurements for the given context.
|
||||
//
|
||||
// This function can be used in places where measuring an instrument
|
||||
// would result in computationally expensive operations.
|
||||
|
||||
6
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
@ -58,7 +58,9 @@ func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
func (mr *ManualReader) aggregation(
|
||||
kind InstrumentKind,
|
||||
) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return mr.aggregationSelector(kind)
|
||||
}
|
||||
|
||||
@ -127,7 +129,7 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the ManualReader.
|
||||
func (r *ManualReader) MarshalLog() interface{} {
|
||||
func (r *ManualReader) MarshalLog() any {
|
||||
r.mu.Lock()
|
||||
down := r.isShutdown
|
||||
r.mu.Unlock()
|
||||
|
||||
77
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
77
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
)
|
||||
|
||||
@ -82,7 +81,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
|
||||
// Int64UpDownCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// int64 measurements during a computational operation.
|
||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
func (m *meter) Int64UpDownCounter(
|
||||
name string,
|
||||
options ...metric.Int64UpDownCounterOption,
|
||||
) (metric.Int64UpDownCounter, error) {
|
||||
cfg := metric.NewInt64UpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindUpDownCounter
|
||||
p := int64InstProvider{m}
|
||||
@ -174,7 +176,10 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
func (m *meter) Int64ObservableCounter(
|
||||
name string,
|
||||
options ...metric.Int64ObservableCounterOption,
|
||||
) (metric.Int64ObservableCounter, error) {
|
||||
cfg := metric.NewInt64ObservableCounterConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -195,7 +200,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
func (m *meter) Int64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...metric.Int64ObservableUpDownCounterOption,
|
||||
) (metric.Int64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -216,7 +224,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
func (m *meter) Int64ObservableGauge(
|
||||
name string,
|
||||
options ...metric.Int64ObservableGaugeOption,
|
||||
) (metric.Int64ObservableGauge, error) {
|
||||
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -246,7 +257,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
|
||||
// Float64UpDownCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// float64 measurements during a computational operation.
|
||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
func (m *meter) Float64UpDownCounter(
|
||||
name string,
|
||||
options ...metric.Float64UpDownCounterOption,
|
||||
) (metric.Float64UpDownCounter, error) {
|
||||
cfg := metric.NewFloat64UpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindUpDownCounter
|
||||
p := float64InstProvider{m}
|
||||
@ -261,7 +275,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow
|
||||
// Float64Histogram returns a new instrument identified by name and configured
|
||||
// with options. The instrument is used to synchronously record the
|
||||
// distribution of float64 measurements during a computational operation.
|
||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
func (m *meter) Float64Histogram(
|
||||
name string,
|
||||
options ...metric.Float64HistogramOption,
|
||||
) (metric.Float64Histogram, error) {
|
||||
cfg := metric.NewFloat64HistogramConfig(options...)
|
||||
p := float64InstProvider{m}
|
||||
i, err := p.lookupHistogram(name, cfg)
|
||||
@ -289,7 +306,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
|
||||
|
||||
// float64ObservableInstrument returns a new observable identified by the Instrument.
|
||||
// It registers callbacks for each reader's pipeline.
|
||||
func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
|
||||
func (m *meter) float64ObservableInstrument(
|
||||
id Instrument,
|
||||
callbacks []metric.Float64Callback,
|
||||
) (float64Observable, error) {
|
||||
key := instID{
|
||||
Name: id.Name,
|
||||
Description: id.Description,
|
||||
@ -338,7 +358,10 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
func (m *meter) Float64ObservableCounter(
|
||||
name string,
|
||||
options ...metric.Float64ObservableCounterOption,
|
||||
) (metric.Float64ObservableCounter, error) {
|
||||
cfg := metric.NewFloat64ObservableCounterConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -359,7 +382,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
func (m *meter) Float64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...metric.Float64ObservableUpDownCounterOption,
|
||||
) (metric.Float64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -380,7 +406,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
func (m *meter) Float64ObservableGauge(
|
||||
name string,
|
||||
options ...metric.Float64ObservableGaugeOption,
|
||||
) (metric.Float64ObservableGauge, error) {
|
||||
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||
id := Instrument{
|
||||
Name: name,
|
||||
@ -393,7 +422,7 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs
|
||||
}
|
||||
|
||||
func validateInstrumentName(name string) error {
|
||||
if len(name) == 0 {
|
||||
if name == "" {
|
||||
return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
|
||||
}
|
||||
if len(name) > 255 {
|
||||
@ -426,8 +455,10 @@ func warnRepeatedObservableCallbacks(id Instrument) {
|
||||
"Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
|
||||
id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
|
||||
)
|
||||
global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
|
||||
"instrument", inst,
|
||||
global.Warn(
|
||||
"Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
|
||||
"instrument",
|
||||
inst,
|
||||
)
|
||||
}
|
||||
|
||||
@ -613,7 +644,10 @@ func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]ag
|
||||
return p.int64Resolver.Aggregators(inst)
|
||||
}
|
||||
|
||||
func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
|
||||
func (p int64InstProvider) histogramAggs(
|
||||
name string,
|
||||
cfg metric.Int64HistogramConfig,
|
||||
) ([]aggregate.Measure[int64], error) {
|
||||
boundaries := cfg.ExplicitBucketBoundaries()
|
||||
aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
|
||||
if aggError != nil {
|
||||
@ -633,7 +667,7 @@ func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramC
|
||||
|
||||
// lookup returns the resolved instrumentImpl.
|
||||
func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
|
||||
return p.meter.int64Insts.Lookup(instID{
|
||||
return p.int64Insts.Lookup(instID{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
Unit: u,
|
||||
@ -646,7 +680,7 @@ func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*i
|
||||
|
||||
// lookupHistogram returns the resolved instrumentImpl.
|
||||
func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
|
||||
return p.meter.int64Insts.Lookup(instID{
|
||||
return p.int64Insts.Lookup(instID{
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Unit: cfg.Unit(),
|
||||
@ -671,7 +705,10 @@ func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]
|
||||
return p.float64Resolver.Aggregators(inst)
|
||||
}
|
||||
|
||||
func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
|
||||
func (p float64InstProvider) histogramAggs(
|
||||
name string,
|
||||
cfg metric.Float64HistogramConfig,
|
||||
) ([]aggregate.Measure[float64], error) {
|
||||
boundaries := cfg.ExplicitBucketBoundaries()
|
||||
aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
|
||||
if aggError != nil {
|
||||
@ -691,7 +728,7 @@ func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64Histog
|
||||
|
||||
// lookup returns the resolved instrumentImpl.
|
||||
func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
|
||||
return p.meter.float64Insts.Lookup(instID{
|
||||
return p.float64Insts.Lookup(instID{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
Unit: u,
|
||||
@ -704,7 +741,7 @@ func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (
|
||||
|
||||
// lookupHistogram returns the resolved instrumentImpl.
|
||||
func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
|
||||
return p.meter.float64Insts.Lookup(instID{
|
||||
return p.float64Insts.Lookup(instID{
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Unit: cfg.Unit(),
|
||||
|
||||
1
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package metricdata provides types for the metric SDK data model.
|
||||
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
|
||||
import (
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
@ -10,7 +10,7 @@ type Temporality uint8
|
||||
|
||||
const (
|
||||
// undefinedTemporality represents an unset Temporality.
|
||||
//nolint:deadcode,unused,varcheck
|
||||
//nolint:unused
|
||||
undefinedTemporality Temporality = iota
|
||||
|
||||
// CumulativeTemporality defines a measurement interval that continues to
|
||||
|
||||
16
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
@ -114,7 +114,7 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri
|
||||
cancel: cancel,
|
||||
done: make(chan struct{}),
|
||||
rmPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
return &metricdata.ResourceMetrics{}
|
||||
},
|
||||
},
|
||||
@ -193,14 +193,16 @@ func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
func (r *PeriodicReader) aggregation(
|
||||
kind InstrumentKind,
|
||||
) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return r.exporter.Aggregation(kind)
|
||||
}
|
||||
|
||||
// collectAndExport gather all metric data related to the periodicReader r from
|
||||
// the SDK and exports it with r's exporter.
|
||||
func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout"))
|
||||
defer cancel()
|
||||
|
||||
// TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
|
||||
@ -232,7 +234,7 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet
|
||||
}
|
||||
|
||||
// collect unwraps p as a produceHolder and returns its produce results.
|
||||
func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
|
||||
func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error {
|
||||
if p == nil {
|
||||
return ErrReaderNotRegistered
|
||||
}
|
||||
@ -276,7 +278,7 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
|
||||
// Prioritize the ctx timeout if it is set.
|
||||
if _, ok := ctx.Deadline(); !ok {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.timeout)
|
||||
ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout"))
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
@ -309,7 +311,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error {
|
||||
// Prioritize the ctx timeout if it is set.
|
||||
if _, ok := ctx.Deadline(); !ok {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.timeout)
|
||||
ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout"))
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
@ -347,7 +349,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the PeriodicReader.
|
||||
func (r *PeriodicReader) MarshalLog() interface{} {
|
||||
func (r *PeriodicReader) MarshalLog() any {
|
||||
r.mu.Lock()
|
||||
down := r.isShutdown
|
||||
r.mu.Unlock()
|
||||
|
||||
95
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
95
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/x"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
@ -37,17 +36,24 @@ type instrumentSync struct {
|
||||
compAgg aggregate.ComputeAggregation
|
||||
}
|
||||
|
||||
func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline {
|
||||
func newPipeline(
|
||||
res *resource.Resource,
|
||||
reader Reader,
|
||||
views []View,
|
||||
exemplarFilter exemplar.Filter,
|
||||
cardinalityLimit int,
|
||||
) *pipeline {
|
||||
if res == nil {
|
||||
res = resource.Empty()
|
||||
}
|
||||
return &pipeline{
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
|
||||
float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
|
||||
exemplarFilter: exemplarFilter,
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
|
||||
float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
|
||||
exemplarFilter: exemplarFilter,
|
||||
cardinalityLimit: cardinalityLimit,
|
||||
// aggregations is lazy allocated when needed.
|
||||
}
|
||||
}
|
||||
@ -65,12 +71,13 @@ type pipeline struct {
|
||||
views []View
|
||||
|
||||
sync.Mutex
|
||||
int64Measures map[observableID[int64]][]aggregate.Measure[int64]
|
||||
float64Measures map[observableID[float64]][]aggregate.Measure[float64]
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context) error
|
||||
multiCallbacks list.List
|
||||
exemplarFilter exemplar.Filter
|
||||
int64Measures map[observableID[int64]][]aggregate.Measure[int64]
|
||||
float64Measures map[observableID[float64]][]aggregate.Measure[float64]
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context) error
|
||||
multiCallbacks list.List
|
||||
exemplarFilter exemplar.Filter
|
||||
cardinalityLimit int
|
||||
}
|
||||
|
||||
// addInt64Measure adds a new int64 measure to the pipeline for each observer.
|
||||
@ -121,6 +128,14 @@ func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
// Only check if context is already cancelled before starting, not inside or after callback loops.
|
||||
// If this method returns after executing some callbacks but before running all aggregations,
|
||||
// internal aggregation state can be corrupted and result in incorrect data returned
|
||||
// by future produce calls.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
@ -130,12 +145,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
|
||||
if e := c(ctx); e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
rm.Resource = nil
|
||||
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
}
|
||||
for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
@ -143,13 +152,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
|
||||
if e := f(ctx); e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
// This means the context expired before we finished running callbacks.
|
||||
rm.Resource = nil
|
||||
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rm.Resource = p.resource
|
||||
@ -347,7 +349,12 @@ func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation
|
||||
//
|
||||
// If the instrument defines an unknown or incompatible aggregation, an error
|
||||
// is returned.
|
||||
func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
|
||||
func (i *inserter[N]) cachedAggregator(
|
||||
scope instrumentation.Scope,
|
||||
kind InstrumentKind,
|
||||
stream Stream,
|
||||
readerAggregation Aggregation,
|
||||
) (meas aggregate.Measure[N], aggID uint64, err error) {
|
||||
switch stream.Aggregation.(type) {
|
||||
case nil:
|
||||
// The aggregation was not overridden with a view. Use the aggregation
|
||||
@ -379,16 +386,18 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
|
||||
normID := id.normalize()
|
||||
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
|
||||
b := aggregate.Builder[N]{
|
||||
Temporality: i.pipeline.reader.temporality(kind),
|
||||
ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter),
|
||||
Temporality: i.pipeline.reader.temporality(kind),
|
||||
ReservoirFunc: reservoirFunc[N](
|
||||
stream.ExemplarReservoirProviderSelector(stream.Aggregation),
|
||||
i.pipeline.exemplarFilter,
|
||||
),
|
||||
}
|
||||
b.Filter = stream.AttributeFilter
|
||||
// A value less than or equal to zero will disable the aggregation
|
||||
// limits for the builder (an all the created aggregates).
|
||||
// CardinalityLimit.Lookup returns 0 by default if unset (or
|
||||
// cardinalityLimit will be 0 by default if unset (or
|
||||
// unrecognized input). Use that value directly.
|
||||
b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
|
||||
|
||||
b.AggregationLimit = i.pipeline.cardinalityLimit
|
||||
in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
|
||||
if err != nil {
|
||||
return aggVal[N]{0, nil, err}
|
||||
@ -423,7 +432,7 @@ func (i *inserter[N]) logConflict(id instID) {
|
||||
}
|
||||
|
||||
const msg = "duplicate metric stream definitions"
|
||||
args := []interface{}{
|
||||
args := []any{
|
||||
"names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
|
||||
"descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
|
||||
"kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
|
||||
@ -457,7 +466,7 @@ func (i *inserter[N]) logConflict(id instID) {
|
||||
global.Warn(msg, args...)
|
||||
}
|
||||
|
||||
func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
|
||||
func (*inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
|
||||
var zero N
|
||||
return instID{
|
||||
Name: stream.Name,
|
||||
@ -471,7 +480,11 @@ func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
|
||||
// aggregateFunc returns new aggregate functions matching agg, kind, and
|
||||
// monotonic. If the agg is unknown or temporality is invalid, an error is
|
||||
// returned.
|
||||
func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
|
||||
func (i *inserter[N]) aggregateFunc(
|
||||
b aggregate.Builder[N],
|
||||
agg Aggregation,
|
||||
kind InstrumentKind,
|
||||
) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
|
||||
switch a := agg.(type) {
|
||||
case AggregationDefault:
|
||||
return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
|
||||
@ -583,10 +596,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
|
||||
// measurement.
|
||||
type pipelines []*pipeline
|
||||
|
||||
func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines {
|
||||
func newPipelines(
|
||||
res *resource.Resource,
|
||||
readers []Reader,
|
||||
views []View,
|
||||
exemplarFilter exemplar.Filter,
|
||||
cardinalityLimit int,
|
||||
) pipelines {
|
||||
pipes := make([]*pipeline, 0, len(readers))
|
||||
for _, r := range readers {
|
||||
p := newPipeline(res, r, views, exemplarFilter)
|
||||
p := newPipeline(res, r, views, exemplarFilter, cardinalityLimit)
|
||||
r.register(p)
|
||||
pipes = append(pipes, p)
|
||||
}
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider {
|
||||
flush, sdown := conf.readerSignals()
|
||||
|
||||
mp := &MeterProvider{
|
||||
pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter),
|
||||
pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter, conf.cardinalityLimit),
|
||||
forceFlush: flush,
|
||||
shutdown: sdown,
|
||||
}
|
||||
|
||||
7
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
@ -117,7 +117,7 @@ type produceHolder struct {
|
||||
type shutdownProducer struct{}
|
||||
|
||||
// produce returns an ErrReaderShutdown error.
|
||||
func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
|
||||
func (shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
|
||||
return ErrReaderShutdown
|
||||
}
|
||||
|
||||
@ -146,7 +146,10 @@ type AggregationSelector func(InstrumentKind) Aggregation
|
||||
// Histogram ⇨ ExplicitBucketHistogram.
|
||||
func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
|
||||
switch ik {
|
||||
case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
|
||||
case InstrumentKindCounter,
|
||||
InstrumentKindUpDownCounter,
|
||||
InstrumentKindObservableCounter,
|
||||
InstrumentKindObservableUpDownCounter:
|
||||
return AggregationSum{}
|
||||
case InstrumentKindObservableGauge, InstrumentKindGauge:
|
||||
return AggregationLastValue{}
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
// version is the current release version of the metric SDK in use.
|
||||
func version() string {
|
||||
return "1.35.0"
|
||||
return "1.38.0"
|
||||
}
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error))
|
||||
|
||||
// Detect returns a *Resource that describes the string as a value
|
||||
// corresponding to attribute.Key as well as the specific schemaURL.
|
||||
func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (sd stringDetector) Detect(context.Context) (*Resource, error) {
|
||||
value, err := sd.F()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %w", string(sd.K), err)
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/resource/container.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/resource/container.go
generated
vendored
@ -11,7 +11,7 @@ import (
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
type containerIDProvider func() (string, error)
|
||||
@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup"
|
||||
|
||||
// Detect returns a *Resource that describes the id of the container.
|
||||
// If no container id found, an empty resource will be returned.
|
||||
func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) {
|
||||
containerID, err := containerID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
type hostIDProvider func() (string, error)
|
||||
@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) {
|
||||
type hostIDDetector struct{}
|
||||
|
||||
// Detect returns a *Resource containing the platform specific host id.
|
||||
func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (hostIDDetector) Detect(context.Context) (*Resource, error) {
|
||||
hostID, err := hostID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
6
vendor/go.opentelemetry.io/otel/sdk/resource/os.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/resource/os.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
type osDescriptionProvider func() (string, error)
|
||||
@ -32,7 +32,7 @@ type (
|
||||
|
||||
// Detect returns a *Resource that describes the operating system type the
|
||||
// service is running on.
|
||||
func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (osTypeDetector) Detect(context.Context) (*Resource, error) {
|
||||
osType := runtimeOS()
|
||||
|
||||
osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
|
||||
@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
|
||||
// Detect returns a *Resource that describes the operating system the
|
||||
// service is running on.
|
||||
func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (osDescriptionDetector) Detect(context.Context) (*Resource, error) {
|
||||
description, err := osDescription()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
6
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
generated
vendored
@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string {
|
||||
return values
|
||||
}
|
||||
|
||||
// skip returns true if the line is blank or starts with a '#' character, and
|
||||
// skip reports whether the line is blank or starts with a '#' character, and
|
||||
// therefore should be skipped from processing.
|
||||
func skip(line string) bool {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
return len(line) == 0 || strings.HasPrefix(line, "#")
|
||||
return line == "" || strings.HasPrefix(line, "#")
|
||||
}
|
||||
|
||||
// parse attempts to split the provided line on the first '=' character, and then
|
||||
@ -76,7 +76,7 @@ func skip(line string) bool {
|
||||
func parse(line string) (string, string, bool) {
|
||||
k, v, found := strings.Cut(line, "=")
|
||||
|
||||
if !found || len(k) == 0 {
|
||||
if !found || k == "" {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
|
||||
18
vendor/go.opentelemetry.io/otel/sdk/resource/process.go
generated
vendored
18
vendor/go.opentelemetry.io/otel/sdk/resource/process.go
generated
vendored
@ -11,7 +11,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -112,19 +112,19 @@ type (
|
||||
|
||||
// Detect returns a *Resource that describes the process identifier (PID) of the
|
||||
// executing process.
|
||||
func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processPIDDetector) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the name of the process executable.
|
||||
func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) {
|
||||
executableName := filepath.Base(commandArgs()[0])
|
||||
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the full path of the process executable.
|
||||
func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) {
|
||||
executablePath, err := executablePath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
|
||||
|
||||
// Detect returns a *Resource that describes all the command arguments as received
|
||||
// by the process.
|
||||
func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the username of the user that owns the
|
||||
// process.
|
||||
func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processOwnerDetector) Detect(context.Context) (*Resource, error) {
|
||||
owner, err := owner()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
|
||||
// Detect returns a *Resource that describes the name of the compiler used to compile
|
||||
// this process image.
|
||||
func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the version of the runtime of this process.
|
||||
func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the runtime of this process.
|
||||
func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) {
|
||||
runtimeDescription := fmt.Sprintf(
|
||||
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
|
||||
|
||||
|
||||
4
vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
generated
vendored
@ -112,7 +112,7 @@ func (r *Resource) String() string {
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Resource.
|
||||
func (r *Resource) MarshalLog() interface{} {
|
||||
func (r *Resource) MarshalLog() any {
|
||||
return struct {
|
||||
Attributes attribute.Set
|
||||
SchemaURL string
|
||||
@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator {
|
||||
return r.attrs.Iter()
|
||||
}
|
||||
|
||||
// Equal returns whether r and o represent the same resource. Two resources can
|
||||
// Equal reports whether r and o represent the same resource. Two resources can
|
||||
// be equal even if they have different schema URLs.
|
||||
//
|
||||
// See the documentation on the [Resource] type for the pitfalls of using ==
|
||||
|
||||
125
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
125
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
@ -5,24 +5,36 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk"
|
||||
"go.opentelemetry.io/otel/sdk/internal/env"
|
||||
"go.opentelemetry.io/otel/sdk/trace/internal/x"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Defaults for BatchSpanProcessorOptions.
|
||||
const (
|
||||
DefaultMaxQueueSize = 2048
|
||||
DefaultScheduleDelay = 5000
|
||||
DefaultMaxQueueSize = 2048
|
||||
// DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds.
|
||||
DefaultScheduleDelay = 5000
|
||||
// DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds.
|
||||
DefaultExportTimeout = 30000
|
||||
DefaultMaxExportBatchSize = 512
|
||||
)
|
||||
|
||||
var queueFull = otelconv.ErrorTypeAttr("queue_full")
|
||||
|
||||
// BatchSpanProcessorOption configures a BatchSpanProcessor.
|
||||
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
|
||||
|
||||
@ -66,6 +78,11 @@ type batchSpanProcessor struct {
|
||||
queue chan ReadOnlySpan
|
||||
dropped uint32
|
||||
|
||||
selfObservabilityEnabled bool
|
||||
callbackRegistration metric.Registration
|
||||
spansProcessedCounter otelconv.SDKProcessorSpanProcessed
|
||||
componentNameAttr attribute.KeyValue
|
||||
|
||||
batch []ReadOnlySpan
|
||||
batchMutex sync.Mutex
|
||||
timer *time.Timer
|
||||
@ -86,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
|
||||
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
|
||||
|
||||
if maxExportBatchSize > maxQueueSize {
|
||||
if DefaultMaxExportBatchSize > maxQueueSize {
|
||||
maxExportBatchSize = maxQueueSize
|
||||
} else {
|
||||
maxExportBatchSize = DefaultMaxExportBatchSize
|
||||
}
|
||||
maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
|
||||
}
|
||||
|
||||
o := BatchSpanProcessorOptions{
|
||||
@ -111,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
if x.SelfObservability.Enabled() {
|
||||
bsp.selfObservabilityEnabled = true
|
||||
bsp.componentNameAttr = componentName()
|
||||
|
||||
var err error
|
||||
bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
|
||||
bsp.componentNameAttr,
|
||||
func() int64 { return int64(len(bsp.queue)) },
|
||||
int64(bsp.o.MaxQueueSize),
|
||||
)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
|
||||
bsp.stopWait.Add(1)
|
||||
go func() {
|
||||
defer bsp.stopWait.Done()
|
||||
@ -121,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
|
||||
return bsp
|
||||
}
|
||||
|
||||
var processorIDCounter atomic.Int64
|
||||
|
||||
// nextProcessorID returns an identifier for this batch span processor,
|
||||
// starting with 0 and incrementing by 1 each time it is called.
|
||||
func nextProcessorID() int64 {
|
||||
return processorIDCounter.Add(1) - 1
|
||||
}
|
||||
|
||||
func componentName() attribute.KeyValue {
|
||||
id := nextProcessorID()
|
||||
name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
|
||||
return semconv.OTelComponentName(name)
|
||||
}
|
||||
|
||||
// newBSPObs creates and returns a new set of metrics instruments and a
|
||||
// registration for a BatchSpanProcessor. It is the caller's responsibility
|
||||
// to unregister the registration when it is no longer needed.
|
||||
func newBSPObs(
|
||||
cmpnt attribute.KeyValue,
|
||||
qLen func() int64,
|
||||
qMax int64,
|
||||
) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
|
||||
meter := otel.GetMeterProvider().Meter(
|
||||
selfObsScopeName,
|
||||
metric.WithInstrumentationVersion(sdk.Version()),
|
||||
metric.WithSchemaURL(semconv.SchemaURL),
|
||||
)
|
||||
|
||||
qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
|
||||
|
||||
qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
|
||||
err = errors.Join(err, e)
|
||||
|
||||
spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
|
||||
err = errors.Join(err, e)
|
||||
|
||||
cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
|
||||
attrs := metric.WithAttributes(cmpnt, cmpntT)
|
||||
|
||||
reg, e := meter.RegisterCallback(
|
||||
func(_ context.Context, o metric.Observer) error {
|
||||
o.ObserveInt64(qSize.Inst(), qLen(), attrs)
|
||||
o.ObserveInt64(qCap.Inst(), qMax, attrs)
|
||||
return nil
|
||||
},
|
||||
qSize.Inst(),
|
||||
qCap.Inst(),
|
||||
)
|
||||
err = errors.Join(err, e)
|
||||
|
||||
return spansProcessed, reg, err
|
||||
}
|
||||
|
||||
// OnStart method does nothing.
|
||||
func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
|
||||
func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
|
||||
|
||||
// OnEnd method enqueues a ReadOnlySpan for later processing.
|
||||
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
|
||||
@ -161,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
if bsp.selfObservabilityEnabled {
|
||||
err = errors.Join(err, bsp.callbackRegistration.Unregister())
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
@ -170,7 +254,7 @@ type forceFlushSpan struct {
|
||||
flushed chan struct{}
|
||||
}
|
||||
|
||||
func (f forceFlushSpan) SpanContext() trace.SpanContext {
|
||||
func (forceFlushSpan) SpanContext() trace.SpanContext {
|
||||
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
|
||||
}
|
||||
|
||||
@ -267,12 +351,17 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
|
||||
|
||||
if bsp.o.ExportTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
|
||||
ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout"))
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if l := len(bsp.batch); l > 0 {
|
||||
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
|
||||
if bsp.selfObservabilityEnabled {
|
||||
bsp.spansProcessedCounter.Add(ctx, int64(l),
|
||||
bsp.componentNameAttr,
|
||||
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
|
||||
}
|
||||
err := bsp.e.ExportSpans(ctx, bsp.batch)
|
||||
|
||||
// A new batch is always created after exporting, even if the batch failed to be exported.
|
||||
@ -381,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
|
||||
case bsp.queue <- sd:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
if bsp.selfObservabilityEnabled {
|
||||
bsp.spansProcessedCounter.Add(ctx, 1,
|
||||
bsp.componentNameAttr,
|
||||
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
|
||||
bsp.spansProcessedCounter.AttrErrorType(queueFull))
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
|
||||
func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
|
||||
if !sd.SpanContext().IsSampled() {
|
||||
return false
|
||||
}
|
||||
@ -395,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
|
||||
return true
|
||||
default:
|
||||
atomic.AddUint32(&bsp.dropped, 1)
|
||||
if bsp.selfObservabilityEnabled {
|
||||
bsp.spansProcessedCounter.Add(ctx, 1,
|
||||
bsp.componentNameAttr,
|
||||
bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
|
||||
bsp.spansProcessedCounter.AttrErrorType(queueFull))
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
|
||||
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
|
||||
func (bsp *batchSpanProcessor) MarshalLog() any {
|
||||
return struct {
|
||||
Type string
|
||||
SpanExporter SpanExporter
|
||||
|
||||
3
vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
generated
vendored
@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing.
|
||||
|
||||
The following assumes a basic familiarity with OpenTelemetry concepts.
|
||||
See https://opentelemetry.io.
|
||||
|
||||
See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
|
||||
the experimental features.
|
||||
*/
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
30
vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
generated
vendored
30
vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
generated
vendored
@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"math/rand/v2"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
@ -29,20 +27,15 @@ type IDGenerator interface {
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
type randomIDGenerator struct {
|
||||
sync.Mutex
|
||||
randSource *rand.Rand
|
||||
}
|
||||
type randomIDGenerator struct{}
|
||||
|
||||
var _ IDGenerator = &randomIDGenerator{}
|
||||
|
||||
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
|
||||
func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID {
|
||||
sid := trace.SpanID{}
|
||||
for {
|
||||
_, _ = gen.randSource.Read(sid[:])
|
||||
binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
|
||||
if sid.IsValid() {
|
||||
break
|
||||
}
|
||||
@ -52,19 +45,18 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
|
||||
|
||||
// NewIDs returns a non-zero trace ID and a non-zero span ID from a
|
||||
// randomly-chosen sequence.
|
||||
func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) {
|
||||
tid := trace.TraceID{}
|
||||
sid := trace.SpanID{}
|
||||
for {
|
||||
_, _ = gen.randSource.Read(tid[:])
|
||||
binary.NativeEndian.PutUint64(tid[:8], rand.Uint64())
|
||||
binary.NativeEndian.PutUint64(tid[8:], rand.Uint64())
|
||||
if tid.IsValid() {
|
||||
break
|
||||
}
|
||||
}
|
||||
for {
|
||||
_, _ = gen.randSource.Read(sid[:])
|
||||
binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
|
||||
if sid.IsValid() {
|
||||
break
|
||||
}
|
||||
@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.
|
||||
}
|
||||
|
||||
func defaultIDGenerator() IDGenerator {
|
||||
gen := &randomIDGenerator{}
|
||||
var rngSeed int64
|
||||
_ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
|
||||
gen.randSource = rand.New(rand.NewSource(rngSeed))
|
||||
return gen
|
||||
return &randomIDGenerator{}
|
||||
}
|
||||
|
||||
35
vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
generated
vendored
Normal file
35
vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# Experimental Features
|
||||
|
||||
The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
|
||||
These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
|
||||
|
||||
These features may change in backwards incompatible ways as feedback is applied.
|
||||
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
|
||||
|
||||
## Features
|
||||
|
||||
- [Self-Observability](#self-observability)
|
||||
|
||||
### Self-Observability
|
||||
|
||||
The SDK provides a self-observability feature that allows you to monitor the SDK itself.
|
||||
|
||||
To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
|
||||
|
||||
When enabled, the SDK will create the following metrics using the global `MeterProvider`:
|
||||
|
||||
- `otel.sdk.span.live`
|
||||
- `otel.sdk.span.started`
|
||||
|
||||
Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
|
||||
|
||||
[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
|
||||
|
||||
## Compatibility and Stability
|
||||
|
||||
Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
|
||||
These features may be removed or modified in successive version releases, including patch versions.
|
||||
|
||||
When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
|
||||
There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
|
||||
If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
|
||||
63
vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
generated
vendored
Normal file
63
vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
|
||||
package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SelfObservability is an experimental feature flag that determines if SDK
|
||||
// self-observability metrics are enabled.
|
||||
//
|
||||
// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
|
||||
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
|
||||
// will also enable this).
|
||||
var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
|
||||
if strings.EqualFold(v, "true") {
|
||||
return v, true
|
||||
}
|
||||
return "", false
|
||||
})
|
||||
|
||||
// Feature is an experimental feature control flag. It provides a uniform way
|
||||
// to interact with these feature flags and parse their values.
|
||||
type Feature[T any] struct {
|
||||
key string
|
||||
parse func(v string) (T, bool)
|
||||
}
|
||||
|
||||
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
|
||||
const envKeyRoot = "OTEL_GO_X_"
|
||||
return Feature[T]{
|
||||
key: envKeyRoot + suffix,
|
||||
parse: parse,
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the environment variable key that needs to be set to enable the
|
||||
// feature.
|
||||
func (f Feature[T]) Key() string { return f.key }
|
||||
|
||||
// Lookup returns the user configured value for the feature and true if the
|
||||
// user has enabled the feature. Otherwise, if the feature is not enabled, a
|
||||
// zero-value and false are returned.
|
||||
func (f Feature[T]) Lookup() (v T, ok bool) {
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
|
||||
//
|
||||
// > The SDK MUST interpret an empty value of an environment variable the
|
||||
// > same way as when the variable is unset.
|
||||
vRaw := os.Getenv(f.key)
|
||||
if vRaw == "" {
|
||||
return v, ok
|
||||
}
|
||||
return f.parse(vRaw)
|
||||
}
|
||||
|
||||
// Enabled reports whether the feature is enabled.
|
||||
func (f Feature[T]) Enabled() bool {
|
||||
_, ok := f.Lookup()
|
||||
return ok
|
||||
}
|
||||
52
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
52
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace/internal/x"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.opentelemetry.io/otel/trace/embedded"
|
||||
"go.opentelemetry.io/otel/trace/noop"
|
||||
@ -20,6 +26,7 @@ import (
|
||||
|
||||
const (
|
||||
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
|
||||
selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
// tracerProviderConfig.
|
||||
@ -45,7 +52,7 @@ type tracerProviderConfig struct {
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
|
||||
func (cfg tracerProviderConfig) MarshalLog() interface{} {
|
||||
func (cfg tracerProviderConfig) MarshalLog() any {
|
||||
return struct {
|
||||
SpanProcessors []SpanProcessor
|
||||
SamplerType string
|
||||
@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
t, ok := p.namedTracer[is]
|
||||
if !ok {
|
||||
t = &tracer{
|
||||
provider: p,
|
||||
instrumentationScope: is,
|
||||
provider: p,
|
||||
instrumentationScope: is,
|
||||
selfObservabilityEnabled: x.SelfObservability.Enabled(),
|
||||
}
|
||||
if t.selfObservabilityEnabled {
|
||||
var err error
|
||||
t.spanLiveMetric, t.spanStartedMetric, err = newInst()
|
||||
if err != nil {
|
||||
msg := "failed to create self-observability metrics for tracer: %w"
|
||||
err := fmt.Errorf(msg, err)
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
p.namedTracer[is] = t
|
||||
}
|
||||
@ -169,11 +186,38 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
// slowing down all tracing consumers.
|
||||
// - Logging code may be instrumented with tracing and deadlock because it could try
|
||||
// acquiring the same non-reentrant mutex.
|
||||
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
|
||||
global.Info(
|
||||
"Tracer created",
|
||||
"name",
|
||||
name,
|
||||
"version",
|
||||
is.Version,
|
||||
"schemaURL",
|
||||
is.SchemaURL,
|
||||
"attributes",
|
||||
is.Attributes,
|
||||
)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
|
||||
m := otel.GetMeterProvider().Meter(
|
||||
selfObsScopeName,
|
||||
metric.WithInstrumentationVersion(sdk.Version()),
|
||||
metric.WithSchemaURL(semconv.SchemaURL),
|
||||
)
|
||||
|
||||
var err error
|
||||
spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
|
||||
err = errors.Join(err, e)
|
||||
|
||||
spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
|
||||
err = errors.Join(err, e)
|
||||
|
||||
return spanLiveMetric, spanStartedMetric, err
|
||||
}
|
||||
|
||||
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
|
||||
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
|
||||
// This check prevents calls during a shutdown.
|
||||
|
||||
8
vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
generated
vendored
@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler {
|
||||
|
||||
type alwaysOnSampler struct{}
|
||||
|
||||
func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
|
||||
func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
|
||||
return SamplingResult{
|
||||
Decision: RecordAndSample,
|
||||
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
|
||||
}
|
||||
}
|
||||
|
||||
func (as alwaysOnSampler) Description() string {
|
||||
func (alwaysOnSampler) Description() string {
|
||||
return "AlwaysOnSampler"
|
||||
}
|
||||
|
||||
@ -131,14 +131,14 @@ func AlwaysSample() Sampler {
|
||||
|
||||
type alwaysOffSampler struct{}
|
||||
|
||||
func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
|
||||
func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
|
||||
return SamplingResult{
|
||||
Decision: Drop,
|
||||
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
|
||||
}
|
||||
}
|
||||
|
||||
func (as alwaysOffSampler) Description() string {
|
||||
func (alwaysOffSampler) Description() string {
|
||||
return "AlwaysOffSampler"
|
||||
}
|
||||
|
||||
|
||||
6
vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
generated
vendored
@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
|
||||
}
|
||||
|
||||
// OnStart does nothing.
|
||||
func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
|
||||
func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
|
||||
|
||||
// OnEnd immediately exports a ReadOnlySpan.
|
||||
func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
|
||||
@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// ForceFlush does nothing as there is no data to flush.
|
||||
func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
|
||||
func (*simpleSpanProcessor) ForceFlush(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent
|
||||
// this Span Processor.
|
||||
func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
|
||||
func (ssp *simpleSpanProcessor) MarshalLog() any {
|
||||
return struct {
|
||||
Type string
|
||||
Exporter SpanExporter
|
||||
|
||||
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
@ -35,7 +35,7 @@ type snapshot struct {
|
||||
|
||||
var _ ReadOnlySpan = snapshot{}
|
||||
|
||||
func (s snapshot) private() {}
|
||||
func (snapshot) private() {}
|
||||
|
||||
// Name returns the name of the span.
|
||||
func (s snapshot) Name() string {
|
||||
|
||||
19
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
19
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.opentelemetry.io/otel/trace/embedded"
|
||||
)
|
||||
@ -61,6 +61,7 @@ type ReadOnlySpan interface {
|
||||
InstrumentationScope() instrumentation.Scope
|
||||
// InstrumentationLibrary returns information about the instrumentation
|
||||
// library that created the span.
|
||||
//
|
||||
// Deprecated: please use InstrumentationScope instead.
|
||||
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||
// Resource returns information about the entity that produced the span.
|
||||
@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext {
|
||||
return s.spanContext
|
||||
}
|
||||
|
||||
// IsRecording returns if this span is being recorded. If this span has ended
|
||||
// IsRecording reports whether this span is being recorded. If this span has ended
|
||||
// this will return false.
|
||||
func (s *recordingSpan) IsRecording() bool {
|
||||
if s == nil {
|
||||
@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool {
|
||||
return s.isRecording()
|
||||
}
|
||||
|
||||
// isRecording returns if this span is being recorded. If this span has ended
|
||||
// isRecording reports whether this span is being recorded. If this span has ended
|
||||
// this will return false.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if s.tracer.selfObservabilityEnabled {
|
||||
defer func() {
|
||||
// Add the span to the context to ensure the metric is recorded
|
||||
// with the correct span context.
|
||||
ctx := trace.ContextWithSpan(context.Background(), s)
|
||||
set := spanLiveSet(s.spanContext.IsSampled())
|
||||
s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
|
||||
}()
|
||||
}
|
||||
|
||||
sps := s.tracer.provider.getSpanProcessors()
|
||||
if len(sps) == 0 {
|
||||
return
|
||||
@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
|
||||
s.addEvent(semconv.ExceptionEventName, opts...)
|
||||
}
|
||||
|
||||
func typeStr(i interface{}) string {
|
||||
func typeStr(i any) string {
|
||||
t := reflect.TypeOf(i)
|
||||
if t.PkgPath() == "" && t.Name() == "" {
|
||||
// Likely a builtin type.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user